Commit 3c182a12 authored by Josh Bleecher Snyder's avatar Josh Bleecher Snyder

runtime: replace t.keysize with fixed key size in evacuate_fastX

Change-Id: I89c3c3b21d7a4acbc49b14a52ac8d9a5861c0c39
Reviewed-on: https://go-review.googlesource.com/59131
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarKeith Randall <khr@golang.org>
Reviewed-by: 's avatarMartin Möhrmann <moehrmann@google.com>
parent 4cf7f909
......@@ -795,7 +795,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*uintptr(t.keysize))
x.v = add(x.k, bucketCnt*4)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -803,13 +803,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*uintptr(t.keysize))
y.v = add(y.k, bucketCnt*4)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
v := add(k, bucketCnt*4)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) {
top := b.tophash[i]
if top == empty {
b.tophash[i] = evacuatedEmpty
......@@ -855,7 +855,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
dst.v = add(dst.k, bucketCnt*4)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey {
......@@ -873,7 +873,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
// key or value arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, uintptr(t.keysize))
dst.k = add(dst.k, 4)
dst.v = add(dst.v, uintptr(t.valuesize))
}
}
......@@ -916,7 +916,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*uintptr(t.keysize))
x.v = add(x.k, bucketCnt*8)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -924,13 +924,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*uintptr(t.keysize))
y.v = add(y.k, bucketCnt*8)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
v := add(k, bucketCnt*8)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
top := b.tophash[i]
if top == empty {
b.tophash[i] = evacuatedEmpty
......@@ -976,7 +976,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
dst.v = add(dst.k, bucketCnt*8)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey {
......@@ -994,7 +994,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
// key or value arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, uintptr(t.keysize))
dst.k = add(dst.k, 8)
dst.v = add(dst.v, uintptr(t.valuesize))
}
}
......@@ -1037,7 +1037,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*uintptr(t.keysize))
x.v = add(x.k, bucketCnt*2*sys.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
......@@ -1045,13 +1045,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*uintptr(t.keysize))
y.v = add(y.k, bucketCnt*2*sys.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
v := add(k, bucketCnt*2*sys.PtrSize)
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
top := b.tophash[i]
if top == empty {
b.tophash[i] = evacuatedEmpty
......@@ -1097,7 +1097,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey {
......@@ -1115,7 +1115,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
// key or value arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
dst.k = add(dst.k, uintptr(t.keysize))
dst.k = add(dst.k, 2*sys.PtrSize)
dst.v = add(dst.v, uintptr(t.valuesize))
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment