Commit f5d494bb authored by Austin Clements's avatar Austin Clements

runtime: ensure GC sees type-safe memory on weak machines

Currently its possible for the garbage collector to observe
uninitialized memory or stale heap bitmap bits on weakly ordered
architectures such as ARM and PPC. On such architectures, the stores
that zero newly allocated memory and initialize its heap bitmap may
move after a store in user code that makes the allocated object
observable by the garbage collector.

To fix this, add a "publication barrier" (also known as an "export
barrier") before returning from mallocgc. This is a store/store
barrier that ensures any write done by user code that makes the
returned object observable to the garbage collector will be ordered
after the initialization performed by mallocgc. No barrier is
necessary on the reading side because of the data dependency between
loading the pointer and loading the contents of the object.

Fixes one of the issues raised in #9984.

Change-Id: Ia3d96ad9c5fc7f4d342f5e05ec0ceae700cd17c8
Reviewed-on: https://go-review.googlesource.com/11083Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
Reviewed-by: 's avatarDmitry Vyukov <dvyukov@google.com>
Reviewed-by: 's avatarMinux Ma <minux@golang.org>
Reviewed-by: 's avatarMartin Capitanio <capnm9@gmail.com>
Reviewed-by: 's avatarRuss Cox <rsc@golang.org>
parent 75ce3306
......@@ -632,6 +632,11 @@ TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
ANDB BX, (AX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
RET
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. pop the caller
......
......@@ -615,6 +615,11 @@ TEXT runtime·atomicand8(SB), NOSPLIT, $0-9
ANDB BX, (AX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
RET
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. pop the caller
......
......@@ -569,6 +569,11 @@ TEXT runtime·atomicand8(SB), NOSPLIT, $0-5
ANDB AX, 0(BX)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
// Stores are already ordered on x86, so this is just a
// compile barrier.
RET
// void jmpdefer(fn, sp);
// called from deferreturn.
// 1. pop the caller
......
......@@ -736,6 +736,17 @@ TEXT runtime·atomicloaduint(SB),NOSPLIT,$0-8
TEXT runtime·atomicstoreuintptr(SB),NOSPLIT,$0-8
B runtime·atomicstore(SB)
// armPublicationBarrier is a native store/store barrier for ARMv7+.
// To implement publiationBarrier in sys_$GOOS_arm.s using the native
// instructions, use:
//
// TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
// B runtime·armPublicationBarrier(SB)
//
TEXT runtime·armPublicationBarrier(SB),NOSPLIT,$-4-0
WORD $0xf57ff05e // DMB ST
RET
// AES hashing not implemented for ARM
TEXT runtime·aeshash(SB),NOSPLIT,$-4-0
MOVW $0, R0
......
......@@ -111,3 +111,7 @@ again:
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
B runtime·xchg64(SB)
TEXT ·publicationBarrier(SB),NOSPLIT,$-8-0
DMB $0xe // DMB ST
RET
......@@ -38,3 +38,10 @@ TEXT ·atomicloadp(SB),NOSPLIT,$-8-16
ISYNC
MOVD R3, ret+8(FP)
RET
TEXT ·publicationBarrier(SB),NOSPLIT,$-8-0
// LWSYNC is the "export" barrier recommended by Power ISA
// v2.07 book II, appendix B.2.2.2.
// LWSYNC is a load/load, load/store, and store/store barrier.
WORD $0x7c2004ac // LWSYNC
RET
......@@ -657,6 +657,14 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
} else {
c.local_scan += typ.ptrdata
}
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
// the caller can make x observable to the garbage
// collector. Otherwise, on weakly ordered machines,
// the garbage collector could follow a pointer to x,
// but see uninitialized memory or stale heap bits.
publicationBarrier()
}
// GCmarkterminate allocates black
......
......@@ -167,6 +167,23 @@ func xaddint64(ptr *int64, delta int64) int64 {
return int64(xadd64((*uint64)(unsafe.Pointer(ptr)), delta))
}
// publicationBarrier performs a store/store barrier (a "publication"
// or "export" barrier). Some form of synchronization is required
// between initializing an object and making that object accessible to
// another processor. Without synchronization, the initialization
// writes and the "publication" write may be reordered, allowing the
// other processor to follow the pointer and observe an uninitialized
// object. In general, higher-level synchronization should be used,
// such as locking or an atomic pointer write. publicationBarrier is
// for when those aren't an option, such as in the implementation of
// the memory manager.
//
// There's no corresponding barrier for the read side because the read
// side naturally has a data dependency order. All architectures that
// Go supports or seems likely to ever support automatically enforce
// data dependency ordering.
func publicationBarrier()
//go:noescape
func setcallerpc(argp unsafe.Pointer, pc uintptr)
......
......@@ -301,6 +301,9 @@ TEXT runtime·cas(SB),NOSPLIT,$0
TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
TEXT runtime·sysctl(SB),NOSPLIT,$0
MOVW mib+0(FP), R0
MOVW miblen+4(FP), R1
......
......@@ -381,6 +381,10 @@ TEXT runtime·casp1(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
B runtime·armcas(SB)
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
// TODO(minux): this only supports ARMv6K+.
TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
WORD $0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3
......
......@@ -416,6 +416,22 @@ check:
TEXT runtime·casp1(SB),NOSPLIT,$0
B runtime·cas(SB)
// As for cas, memory barriers are complicated on ARM, but the kernel
// provides a user helper. ARMv5 does not support SMP and has no
// memory barrier instruction at all. ARMv6 added SMP support and has
// a memory barrier, but it requires writing to a coprocessor
// register. ARMv7 introduced the DMB instruction, but it's expensive
// even on single-core devices. The kernel helper takes care of all of
// this for us.
TEXT publicationBarrier<>(SB),NOSPLIT,$0
// void __kuser_memory_barrier(void);
MOVW $0xffff0fa0, R15 // R15 is hardware PC.
TEXT ·publicationBarrier(SB),NOSPLIT,$0
BL publicationBarrier<>(SB)
RET
TEXT runtime·osyield(SB),NOSPLIT,$0
MOVW $SYS_sched_yield, R7
SWI $0
......
......@@ -323,5 +323,9 @@ TEXT runtime·casp1(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
B runtime·armcas(SB)
// Likewise, this is only valid for ARMv7+, but that's okay.
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
WORD $0xe7fedef0 // NACL_INSTR_ARM_ABORT_NOW (UDF #0xEDE0)
......@@ -349,6 +349,10 @@ TEXT runtime·casp1(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
B runtime·armcas(SB)
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
MOVM.WP [R1, R2, R3, R12], (R13)
SWI $0x00a0013c // _lwp_getprivate
......
......@@ -374,6 +374,9 @@ TEXT runtime·casp1(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
B runtime·armcas(SB)
TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
B runtime·armPublicationBarrier(SB)
// TODO(jsing): Implement.
TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
MOVW $5, R0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment