Commit 42c12147 authored by Austin Clements's avatar Austin Clements

runtime: eliminate write barriers from alloc/mark bitmaps

This introduces a new type, *gcBits, to use for alloc/mark bitmap
allocations instead of *uint8. This type is marked go:notinheap, so
uses of it correctly eliminate write barriers. Since we now have a
type, this also extracts some common operations to methods both for
convenience and to avoid (*uint8) casts at most use sites.

For #19325.

Change-Id: Id51f734fb2e96b8b7715caa348c8dcd4aef0696a
Reviewed-on: https://go-review.googlesource.com/38580
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent 9d1b2f88
......@@ -182,10 +182,8 @@ type markBits struct {
//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
whichByte := allocBitIndex / 8
whichBit := allocBitIndex % 8
bytePtr := addb(s.allocBits, whichByte)
return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
bytep, mask := s.allocBits.bitp(allocBitIndex)
return markBits{bytep, mask, allocBitIndex}
}
// refillaCache takes 8 bytes s.allocBits starting at whichByte
......@@ -193,7 +191,7 @@ func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uintptr) {
bytes := (*[8]uint8)(unsafe.Pointer(addb(s.allocBits, whichByte)))
bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
aCache := uint64(0)
aCache |= uint64(bytes[0])
aCache |= uint64(bytes[1]) << (1 * 8)
......@@ -265,10 +263,8 @@ func (s *mspan) isFree(index uintptr) bool {
if index < s.freeindex {
return false
}
whichByte := index / 8
whichBit := index % 8
byteVal := *addb(s.allocBits, whichByte)
return byteVal&uint8(1<<whichBit) == 0
bytep, mask := s.allocBits.bitp(index)
return *bytep&mask == 0
}
func (s *mspan) objIndex(p uintptr) uintptr {
......@@ -290,14 +286,12 @@ func markBitsForAddr(p uintptr) markBits {
}
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
whichByte := objIndex / 8
bitMask := uint8(1 << (objIndex % 8)) // low 3 bits hold the bit index
bytePtr := addb(s.gcmarkBits, whichByte)
return markBits{bytePtr, bitMask, objIndex}
bytep, mask := s.gcmarkBits.bitp(objIndex)
return markBits{bytep, mask, objIndex}
}
func (s *mspan) markBitsForBase() markBits {
return markBits{s.gcmarkBits, uint8(1), 0}
return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
}
// isMarked reports whether mark bit m is set.
......@@ -827,11 +821,11 @@ func (s *mspan) countAlloc() int {
count := 0
maxIndex := s.nelems / 8
for i := uintptr(0); i < maxIndex; i++ {
mrkBits := *addb(s.gcmarkBits, i)
mrkBits := *s.gcmarkBits.bytep(i)
count += int(oneBitCount[mrkBits])
}
if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
mrkBits := *addb(s.gcmarkBits, maxIndex)
mrkBits := *s.gcmarkBits.bytep(maxIndex)
mask := uint8((1 << bitsInLastByte) - 1)
bits := mrkBits & mask
count += int(oneBitCount[bits])
......
......@@ -229,8 +229,8 @@ type mspan struct {
// The sweep will free the old allocBits and set allocBits to the
// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
// out memory.
allocBits *uint8
gcmarkBits *uint8
allocBits *gcBits
gcmarkBits *gcBits
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
......@@ -1417,6 +1417,22 @@ func freespecial(s *special, p unsafe.Pointer, size uintptr) {
}
}
// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
//
//go:notinheap
type gcBits uint8
// bytep returns a pointer to the n'th byte of b.
func (b *gcBits) bytep(n uintptr) *uint8 {
return addb((*uint8)(b), n)
}
// bitp returns a pointer to the byte containing bit n and a mask for
// selecting that bit from *bytep.
func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
return b.bytep(n / 8), 1 << (n % 8)
}
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
......@@ -1430,7 +1446,7 @@ type gcBitsArena struct {
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte; read/write atomically
next *gcBitsArena
bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8
bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
}
var gcBitsArenas struct {
......@@ -1443,7 +1459,7 @@ var gcBitsArenas struct {
// tryAlloc allocates from b or returns nil if b does not have enough room.
// This is safe to call concurrently.
func (b *gcBitsArena) tryAlloc(bytes uintptr) *uint8 {
func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
return nil
}
......@@ -1459,7 +1475,7 @@ func (b *gcBitsArena) tryAlloc(bytes uintptr) *uint8 {
// newMarkBits returns a pointer to 8 byte aligned bytes
// to be used for a span's mark bits.
func newMarkBits(nelems uintptr) *uint8 {
func newMarkBits(nelems uintptr) *gcBits {
blocksNeeded := uintptr((nelems + 63) / 64)
bytesNeeded := blocksNeeded * 8
......@@ -1515,7 +1531,7 @@ func newMarkBits(nelems uintptr) *uint8 {
// allocation bits. For spans not being initialized the
// the mark bits are repurposed as allocation bits when
// the span is swept.
func newAllocBits(nelems uintptr) *uint8 {
func newAllocBits(nelems uintptr) *gcBits {
return newMarkBits(nelems)
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment