Commit f7aa454c authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

runtime: mask shifts in map implementation on x86

This slightly improves the generated code on x86 architectures,
including on many hot paths.

It is a no-op on other architectures.

Change-Id: I86336fd846bc5805a27bbec572e8c73dcbd0d567
Reviewed-on: https://go-review.googlesource.com/57411
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarKeith Randall <khr@golang.org>
parent 5df1fe52
...@@ -170,6 +170,19 @@ type hiter struct { ...@@ -170,6 +170,19 @@ type hiter struct {
checkBucket uintptr checkBucket uintptr
} }
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 {
b &= sys.PtrSize*8 - 1 // help x86 archs remove shift overflow checks
}
return uintptr(1) << b
}
// bucketMask returns 1<<b - 1, optimized for code generation.
func bucketMask(b uint8) uintptr {
return bucketShift(b) - 1
}
// tophash calculates the tophash value for hash. // tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 { func tophash(hash uintptr) uint8 {
top := uint8(hash >> (sys.PtrSize*8 - 8)) top := uint8(hash >> (sys.PtrSize*8 - 8))
...@@ -374,7 +387,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -374,7 +387,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -429,7 +442,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) ...@@ -429,7 +442,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -473,7 +486,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe ...@@ -473,7 +486,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
} }
alg := t.key.alg alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0)) hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -555,7 +568,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { ...@@ -555,7 +568,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
} }
again: again:
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -662,7 +675,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { ...@@ -662,7 +675,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
// in which case we have not actually done a write (delete). // in which case we have not actually done a write (delete).
h.flags |= hashWriting h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -758,7 +771,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { ...@@ -758,7 +771,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
if h.B > 31-bucketCntBits { if h.B > 31-bucketCntBits {
r += uintptr(fastrand()) << 31 r += uintptr(fastrand()) << 31
} }
it.startBucket = r & (uintptr(1)<<h.B - 1) it.startBucket = r & bucketMask(h.B)
it.offset = uint8(r >> h.B & (bucketCnt - 1)) it.offset = uint8(r >> h.B & (bucketCnt - 1))
// iterator state // iterator state
...@@ -817,7 +830,7 @@ next: ...@@ -817,7 +830,7 @@ next:
checkBucket = noCheck checkBucket = noCheck
} }
bucket++ bucket++
if bucket == uintptr(1)<<it.B { if bucket == bucketShift(it.B) {
bucket = 0 bucket = 0
it.wrapped = true it.wrapped = true
} }
...@@ -845,7 +858,7 @@ next: ...@@ -845,7 +858,7 @@ next:
// If the item in the oldbucket is not destined for // If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it. // the current new bucket in the iteration, skip it.
hash := alg.hash(k, uintptr(h.hash0)) hash := alg.hash(k, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket { if hash&bucketMask(it.B) != checkBucket {
continue continue
} }
} else { } else {
...@@ -901,7 +914,7 @@ next: ...@@ -901,7 +914,7 @@ next:
} }
func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) { func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
base := uintptr(1 << b) base := bucketShift(b)
nbuckets := base nbuckets := base
// For small b, overflow buckets are unlikely. // For small b, overflow buckets are unlikely.
// Avoid the overhead of the calculation. // Avoid the overhead of the calculation.
...@@ -909,7 +922,7 @@ func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow ...@@ -909,7 +922,7 @@ func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow
// Add on the estimated number of overflow buckets // Add on the estimated number of overflow buckets
// required to insert the median number of elements // required to insert the median number of elements
// used with this value of b. // used with this value of b.
nbuckets += 1 << (b - 4) nbuckets += bucketShift(b - 4)
sz := t.bucket.size * nbuckets sz := t.bucket.size * nbuckets
up := roundupsize(sz) up := roundupsize(sz)
if up != sz { if up != sz {
...@@ -975,7 +988,7 @@ func hashGrow(t *maptype, h *hmap) { ...@@ -975,7 +988,7 @@ func hashGrow(t *maptype, h *hmap) {
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
func overLoadFactor(count int, B uint8) bool { func overLoadFactor(count int, B uint8) bool {
return count >= bucketCnt && uint64(count) >= loadFactorNum*((uint64(1)<<B)/loadFactorDen) return count >= bucketCnt && uintptr(count) >= loadFactorNum*(bucketShift(B)/loadFactorDen)
} }
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets. // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
...@@ -1009,7 +1022,7 @@ func (h *hmap) noldbuckets() uintptr { ...@@ -1009,7 +1022,7 @@ func (h *hmap) noldbuckets() uintptr {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
oldB-- oldB--
} }
return uintptr(1) << oldB return bucketShift(oldB)
} }
// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
......
...@@ -26,7 +26,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -26,7 +26,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -69,7 +69,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { ...@@ -69,7 +69,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -112,7 +112,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -112,7 +112,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -155,7 +155,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { ...@@ -155,7 +155,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
b = (*bmap)(h.buckets) b = (*bmap)(h.buckets)
} else { } else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -243,7 +243,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -243,7 +243,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -335,7 +335,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { ...@@ -335,7 +335,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
} }
dohash: dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1 m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil { if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() { if !h.sameSizeGrow() {
...@@ -386,7 +386,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { ...@@ -386,7 +386,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
} }
again: again:
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -471,7 +471,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { ...@@ -471,7 +471,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
} }
again: again:
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -557,7 +557,7 @@ func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { ...@@ -557,7 +557,7 @@ func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
} }
again: again:
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -642,7 +642,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { ...@@ -642,7 +642,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -695,7 +695,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { ...@@ -695,7 +695,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
...@@ -749,7 +749,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { ...@@ -749,7 +749,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
// Set hashWriting after calling alg.hash for consistency with mapdelete // Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1) bucket := hash & bucketMask(h.B)
if h.growing() { if h.growing() {
growWork(t, h, bucket) growWork(t, h, bucket)
} }
......
...@@ -369,7 +369,7 @@ func TestIntendedInlining(t *testing.T) { ...@@ -369,7 +369,7 @@ func TestIntendedInlining(t *testing.T) {
t.Parallel() t.Parallel()
// want is the list of function names that should be inlined. // want is the list of function names that should be inlined.
want := []string{"tophash", "add", "(*bmap).keys"} want := []string{"tophash", "add", "(*bmap).keys", "bucketShift", "bucketMask"}
m := make(map[string]bool, len(want)) m := make(map[string]bool, len(want))
for _, s := range want { for _, s := range want {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment