Commit 24531088 authored by Austin Clements's avatar Austin Clements

runtime: eliminate all writebarrierptr* calls

Calls to writebarrierptr can simply be actual pointer writes. Calls to
writebarrierptr_prewrite need to go through the write barrier buffer.

Updates #22460.

Change-Id: I92cee4da98c5baa499f1977563757c76f95bf0ca
Reviewed-on: https://go-review.googlesource.com/92704
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent 2ae1e1ae
...@@ -16,11 +16,24 @@ import ( ...@@ -16,11 +16,24 @@ import (
// Instead, these are wrappers around the actual atomics (casp1 and so on) // Instead, these are wrappers around the actual atomics (casp1 and so on)
// that use noescape to convey which arguments do not escape. // that use noescape to convey which arguments do not escape.
// atomicwb performs a write barrier before an atomic pointer write.
// The caller should guard the call with "if writeBarrier.enabled".
//
//go:nosplit
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer) {
slot := (*uintptr)(unsafe.Pointer(ptr))
if !getg().m.p.ptr().wbBuf.putFast(*slot, uintptr(new)) {
wbBufFlush(slot, uintptr(new))
}
}
// atomicstorep performs *ptr = new atomically and invokes a write barrier. // atomicstorep performs *ptr = new atomically and invokes a write barrier.
// //
//go:nosplit //go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
writebarrierptr_prewrite((*uintptr)(ptr), uintptr(new)) if writeBarrier.enabled {
atomicwb((*unsafe.Pointer)(ptr), new)
}
atomic.StorepNoWB(noescape(ptr), new) atomic.StorepNoWB(noescape(ptr), new)
} }
...@@ -29,7 +42,9 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { ...@@ -29,7 +42,9 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
// The write barrier is only necessary if the CAS succeeds, // The write barrier is only necessary if the CAS succeeds,
// but since it needs to happen before the write becomes // but since it needs to happen before the write becomes
// public, we have to do it conservatively all the time. // public, we have to do it conservatively all the time.
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) if writeBarrier.enabled {
atomicwb(ptr, new)
}
return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
} }
...@@ -43,7 +58,9 @@ func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) ...@@ -43,7 +58,9 @@ func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
//go:linkname sync_atomic_StorePointer sync/atomic.StorePointer //go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
//go:nosplit //go:nosplit
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) {
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) if writeBarrier.enabled {
atomicwb(ptr, new)
}
sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
} }
...@@ -53,7 +70,9 @@ func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr ...@@ -53,7 +70,9 @@ func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
//go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer //go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
//go:nosplit //go:nosplit
func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) if writeBarrier.enabled {
atomicwb(ptr, new)
}
old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new))) old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(new)))
return old return old
} }
...@@ -64,6 +83,8 @@ func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool ...@@ -64,6 +83,8 @@ func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool
//go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer //go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
//go:nosplit //go:nosplit
func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
writebarrierptr_prewrite((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) if writeBarrier.enabled {
atomicwb(ptr, new)
}
return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new)) return sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new))
} }
...@@ -310,6 +310,8 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { ...@@ -310,6 +310,8 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
// So make sure that no preemption points can happen between read & use. // So make sure that no preemption points can happen between read & use.
dst := sg.elem dst := sg.elem
typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
// No need for cgo write barrier checks because dst is always
// Go memory.
memmove(dst, src, t.size) memmove(dst, src, t.size)
} }
......
...@@ -1002,7 +1002,8 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1002,7 +1002,8 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
// Copy key. // Copy key.
if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) // Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else { } else {
*(*uint32)(dst.k) = *(*uint32)(k) *(*uint32)(dst.k) = *(*uint32)(k)
} }
...@@ -1103,7 +1104,8 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { ...@@ -1103,7 +1104,8 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
// Copy key. // Copy key.
if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
if sys.PtrSize == 8 { if sys.PtrSize == 8 {
writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) // Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else { } else {
// There are three ways to squeeze at least one 32 bit pointer into 64 bits. // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
// Give up and call typedmemmove. // Give up and call typedmemmove.
......
...@@ -550,6 +550,8 @@ func (h heapBits) setCheckmarked(size uintptr) { ...@@ -550,6 +550,8 @@ func (h heapBits) setCheckmarked(size uintptr) {
// make sure the underlying allocation contains pointers, usually // make sure the underlying allocation contains pointers, usually
// by checking typ.kind&kindNoPointers. // by checking typ.kind&kindNoPointers.
// //
// Callers must perform cgo checks if writeBarrier.cgo.
//
//go:nosplit //go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) { func bulkBarrierPreWrite(dst, src, size uintptr) {
if (dst|src|size)&(sys.PtrSize-1) != 0 { if (dst|src|size)&(sys.PtrSize-1) != 0 {
...@@ -649,7 +651,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { ...@@ -649,7 +651,7 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
} }
} }
// typeBitsBulkBarrier executes writebarrierptr_prewrite for every // typeBitsBulkBarrier executes a write barrier for every
// pointer that would be copied from [src, src+size) to [dst, // pointer that would be copied from [src, src+size) to [dst,
// dst+size) by a memmove using the type bitmap to locate those // dst+size) by a memmove using the type bitmap to locate those
// pointer slots. // pointer slots.
...@@ -663,6 +665,8 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { ...@@ -663,6 +665,8 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
// Must not be preempted because it typically runs right before memmove, // Must not be preempted because it typically runs right before memmove,
// and the GC must observe them as an atomic action. // and the GC must observe them as an atomic action.
// //
// Callers must perform cgo checks if writeBarrier.cgo.
//
//go:nosplit //go:nosplit
func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
if typ == nil { if typ == nil {
...@@ -680,6 +684,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { ...@@ -680,6 +684,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
return return
} }
ptrmask := typ.gcdata ptrmask := typ.gcdata
buf := &getg().m.p.ptr().wbBuf
var bits uint32 var bits uint32
for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize { for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
if i&(sys.PtrSize*8-1) == 0 { if i&(sys.PtrSize*8-1) == 0 {
...@@ -691,7 +696,9 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { ...@@ -691,7 +696,9 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
if bits&1 != 0 { if bits&1 != 0 {
dstx := (*uintptr)(unsafe.Pointer(dst + i)) dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i)) srcx := (*uintptr)(unsafe.Pointer(src + i))
writebarrierptr_prewrite(dstx, *srcx) if !buf.putFast(*dstx, *srcx) {
wbBufFlush(nil, 0)
}
} }
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment