Commit be1ef467 authored by Ian Lance Taylor's avatar Ian Lance Taylor

runtime: add optional expensive check for invalid cgo pointer passing

If you set GODEBUG=cgocheck=2 the runtime package will use the write
barrier to detect cases where a Go program writes a Go pointer into
non-Go memory.  In conjunction with the existing cgo checks, and the
not-yet-implemented cgo check for exported functions, this should
reliably detect all cases (that do not import the unsafe package) in
which a Go pointer is incorrectly shared with C code.  This check is
optional because it turns on the write barrier at all times, which is
known to be expensive.

Update #12416.

Change-Id: I549d8b2956daa76eac853928e9280e615d6365f4
Reviewed-on: https://go-review.googlesource.com/16899Reviewed-by: 's avatarRuss Cox <rsc@golang.org>
parent 1860a0fa
This diff is collapsed.
......@@ -86,7 +86,7 @@ const runtimeimport = "" +
"func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" +
"func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" +
"func @\"\".closechan (@\"\".hchan·1 any)\n" +
"var @\"\".writeBarrierEnabled bool\n" +
"var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" +
"func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
......
......@@ -108,7 +108,11 @@ func chanrecv2(chanType *byte, hchan <-chan any, elem *any) bool
func chansend1(chanType *byte, hchan chan<- any, elem *any)
func closechan(hchan any)
var writeBarrierEnabled bool
var writeBarrier struct {
enabled bool
needed bool
cgo bool
}
func writebarrierptr(dst *any, src any)
func writebarrierstring(dst *any, src any)
......
......@@ -801,7 +801,9 @@ func cgen_wbptr(n, res *Node) {
Cgenr(n, &src, nil)
}
wbEnabled := syslook("writeBarrierEnabled", 0)
wbVar := syslook("writeBarrier", 0)
wbEnabled := Nod(ODOT, wbVar, newname(wbVar.Type.Type.Sym))
wbEnabled = typecheck(&wbEnabled, Erv)
pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
pjmp := Gbranch(obj.AJMP, nil, 0)
......
......@@ -390,7 +390,7 @@ func cgoCheckPointer(ptr interface{}, args ...interface{}) interface{} {
const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
// cgoCheckArg is the real work of cgoCheckPointer. The argument p,
// cgoCheckArg is the real work of cgoCheckPointer. The argument p
// is either a pointer to the value (of type t), or the value itself,
// depending on indir. The top parameter is whether we are at the top
// level, where Go pointers are allowed.
......@@ -414,7 +414,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
}
for i := uintptr(0); i < at.len; i++ {
cgoCheckArg(at.elem, p, true, top)
p = unsafe.Pointer(uintptr(p) + at.elem.size)
p = add(p, at.elem.size)
}
case kindChan, kindMap:
// These types contain internal pointers that will
......@@ -440,7 +440,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
if inheap(uintptr(unsafe.Pointer(it))) {
panic(errorString(cgoCheckPointerFail))
}
p = *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + sys.PtrSize))
p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
if !cgoIsGoPointer(p) {
return
}
......@@ -460,7 +460,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
}
for i := 0; i < s.cap; i++ {
cgoCheckArg(st.elem, p, true, false)
p = unsafe.Pointer(uintptr(p) + st.elem.size)
p = add(p, st.elem.size)
}
case kindStruct:
st := (*structtype)(unsafe.Pointer(t))
......@@ -472,7 +472,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool) {
return
}
for _, f := range st.fields {
cgoCheckArg(f.typ, unsafe.Pointer(uintptr(p)+f.offset), true, top)
cgoCheckArg(f.typ, add(p, f.offset), true, top)
}
case kindPtr, kindUnsafePointer:
if indir {
......@@ -539,6 +539,8 @@ func cgoCheckUnknownPointer(p unsafe.Pointer) {
// cgoIsGoPointer returns whether the pointer is a Go pointer--a
// pointer to Go memory. We only care about Go memory that might
// contain pointers.
//go:nosplit
//go:nowritebarrierrec
func cgoIsGoPointer(p unsafe.Pointer) bool {
if p == nil {
return false
......@@ -558,6 +560,8 @@ func cgoIsGoPointer(p unsafe.Pointer) bool {
}
// cgoInRange returns whether p is between start and end.
//go:nosplit
//go:nowritebarrierrec
func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
return start <= uintptr(p) && uintptr(p) < end
}
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code to check that pointer writes follow the cgo rules.
// These functions are invoked via the write barrier when debug.cgocheck > 1.
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
// It throws if the program is storing a Go pointer into non-Go memory.
//go:nosplit
//go:nowritebarrier
func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
if !cgoIsGoPointer(unsafe.Pointer(src)) {
return
}
if cgoIsGoPointer(unsafe.Pointer(dst)) {
return
}
// If we are running on the system stack then dst might be an
// address on the stack, which is OK.
g := getg()
if g == g.m.g0 || g == g.m.gsignal {
return
}
// Allocating memory can write to various mfixalloc structs
// that look like they are non-Go memory.
if g.m.mallocing != 0 {
return
}
systemstack(func() {
println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
throw(cgoWriteBarrierFail)
})
}
// cgoCheckMemmove is called when moving a block of memory.
// dst and src point off bytes into the value to copy.
// size is the number of bytes to copy.
// It throws if the program is copying a block that contains a Go pointer
// into non-Go memory.
//go:nosplit
//go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers != 0 {
return
}
if !cgoIsGoPointer(src) {
return
}
if cgoIsGoPointer(dst) {
return
}
cgoCheckTypedBlock(typ, src, off, size)
}
// cgoCheckSliceCopy is called when copying n elements of a slice from
// src to dst. typ is the element type of the slice.
// It throws if the program is copying slice elements that contain Go pointers
// into non-Go memory.
//go:nosplit
//go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
if typ.kind&kindNoPointers != 0 {
return
}
if !cgoIsGoPointer(src.array) {
return
}
if cgoIsGoPointer(dst.array) {
return
}
p := src.array
for i := 0; i < n; i++ {
cgoCheckTypedBlock(typ, p, 0, typ.size)
p = add(p, typ.size)
}
}
// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
// and throws if it finds a Go pointer. The type of the memory is typ,
// and src is off bytes into that type.
//go:nosplit
//go:nowritebarrier
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindGCProg == 0 {
cgoCheckBits(src, typ.gcdata, off, size)
return
}
// The type has a GC program. Try to find GC bits somewhere else.
for datap := &firstmoduledata; datap != nil; datap = datap.next {
if cgoInRange(src, datap.data, datap.edata) {
doff := uintptr(src) - datap.data
cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size)
return
}
if cgoInRange(src, datap.bss, datap.ebss) {
boff := uintptr(src) - datap.bss
cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size)
return
}
}
aoff := uintptr(src) - mheap_.arena_start
idx := aoff >> _PageShift
s := h_spans[idx]
if s.state == _MSpanStack {
// There are no heap bits for value stored on the stack.
// For a channel receive src might be on the stack of some
// other goroutine, so we can't unwind the stack even if
// we wanted to.
// We can't expand the GC program without extra storage
// space we can't easily get.
// Fortunately we have the type information.
systemstack(func() {
cgoCheckUsingType(typ, src, off, size)
})
return
}
// src must be in the regular heap.
hbits := heapBitsForAddr(uintptr(src))
for i := uintptr(0); i < off+size; i += sys.PtrSize {
bits := hbits.bits()
if bits != 0 {
println(i, bits)
}
if i >= off && bits&bitPointer != 0 {
v := *(*unsafe.Pointer)(add(src, i))
if cgoIsGoPointer(v) {
systemstack(func() {
throw(cgoWriteBarrierFail)
})
}
}
hbits = hbits.next()
}
}
// cgoCheckBits checks the block of memory at src, for up to size
// bytes, and throws if it finds a Go pointer. The gcbits mark each
// pointer value. The src pointer is off bytes into the gcbits.
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
skipMask := off / sys.PtrSize / 8
skipBytes := skipMask * sys.PtrSize * 8
ptrmask := addb(gcbits, skipMask)
src = add(src, skipBytes)
off -= skipBytes
size += off
var bits uint32
for i := uintptr(0); i < size; i += sys.PtrSize {
if i&(sys.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits >>= 1
}
if off > 0 {
off -= sys.PtrSize
} else {
if bits&1 != 0 {
v := *(*unsafe.Pointer)(add(src, i))
if cgoIsGoPointer(v) {
systemstack(func() {
throw(cgoWriteBarrierFail)
})
}
}
}
}
}
// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
// fall back to look for pointers in src using the type information.
// We only this when looking at a value on the stack when the type
// uses a GC program, because otherwise it's more efficient to use the
// GC bits. This is called on the system stack.
//go:nowritebarrier
//go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
if typ.kind&kindNoPointers != 0 {
return
}
if typ.kind&kindGCProg == 0 {
cgoCheckBits(src, typ.gcdata, off, size)
return
}
switch typ.kind & kindMask {
default:
throw("can't happen")
case kindArray:
at := (*arraytype)(unsafe.Pointer(typ))
for i := uintptr(0); i < at.len; i++ {
if off < at.elem.size {
cgoCheckUsingType(at.elem, src, off, size)
}
src = add(src, at.elem.size)
skipped := off
if skipped > at.elem.size {
skipped = at.elem.size
}
checked := at.elem.size - skipped
off -= skipped
if size <= checked {
return
}
size -= checked
}
case kindStruct:
st := (*structtype)(unsafe.Pointer(typ))
for _, f := range st.fields {
if off < f.typ.size {
cgoCheckUsingType(f.typ, src, off, size)
}
src = add(src, f.typ.size)
skipped := off
if skipped > f.typ.size {
skipped = f.typ.size
}
checked := f.typ.size - skipped
off -= skipped
if size <= checked {
return
}
size -= checked
}
}
}
......@@ -27,6 +27,13 @@ It is a comma-separated list of name=val pairs setting these named variables:
allocfreetrace: setting allocfreetrace=1 causes every allocation to be
profiled and a stack trace printed on each object's allocation and free.
cgocheck: setting cgocheck=0 disables all checks for packages
using cgo to incorrectly pass Go pointers to non-Go code.
Setting cgocheck=1 (the default) enables relatively cheap
checks that may miss some errors. Setting cgocheck=2 enables
expensive checks that should not miss any errors, but will
cause your program to run slower.
efence: setting efence=1 causes the allocator to run in a mode
where each object is allocated on a unique page and addresses are
never recycled.
......
......@@ -39,7 +39,7 @@ import (
// white object dies before it is reached by the
// GC then the object can be collected during this GC cycle
// instead of waiting for the next cycle. Unfortunately the cost of
// ensure that the object holding the slot doesn't concurrently
// ensuring that the object holding the slot doesn't concurrently
// change to black without the mutator noticing seems prohibitive.
//
// Consider the following example where the mutator writes into
......@@ -89,7 +89,7 @@ import (
// stack frames that have not been active.
//go:nowritebarrierrec
func gcmarkwb_m(slot *uintptr, ptr uintptr) {
if writeBarrierEnabled {
if writeBarrier.needed {
if ptr != 0 && inheap(ptr) {
shade(ptr)
}
......@@ -128,7 +128,10 @@ func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
//go:nosplit
func writebarrierptr(dst *uintptr, src uintptr) {
*dst = src
if !writeBarrierEnabled {
if writeBarrier.cgo {
cgoCheckWriteBarrier(dst, src)
}
if !writeBarrier.needed {
return
}
if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
......@@ -144,7 +147,10 @@ func writebarrierptr(dst *uintptr, src uintptr) {
// Do not reapply.
//go:nosplit
func writebarrierptr_nostore(dst *uintptr, src uintptr) {
if !writeBarrierEnabled {
if writeBarrier.cgo {
cgoCheckWriteBarrier(dst, src)
}
if !writeBarrier.needed {
return
}
if src != 0 && (src < sys.PhysPageSize || src == poisonStack) {
......@@ -182,6 +188,9 @@ func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
//go:nosplit
func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
memmove(dst, src, typ.size)
if writeBarrier.cgo {
cgoCheckMemmove(typ, dst, src, 0, typ.size)
}
if typ.kind&kindNoPointers != 0 {
return
}
......@@ -198,7 +207,10 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
memmove(dst, src, size)
if !writeBarrierEnabled || typ.kind&kindNoPointers != 0 || size < sys.PtrSize || !inheap(uintptr(dst)) {
if writeBarrier.cgo {
cgoCheckMemmove(typ, dst, src, off, size)
}
if !writeBarrier.needed || typ.kind&kindNoPointers != 0 || size < sys.PtrSize || !inheap(uintptr(dst)) {
return
}
......@@ -218,7 +230,7 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
// not to be preempted before the write barriers have been run.
//go:nosplit
func callwritebarrier(typ *_type, frame unsafe.Pointer, framesize, retoffset uintptr) {
if !writeBarrierEnabled || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize || !inheap(uintptr(frame)) {
if !writeBarrier.needed || typ == nil || typ.kind&kindNoPointers != 0 || framesize-retoffset < sys.PtrSize || !inheap(uintptr(frame)) {
return
}
heapBitsBulkBarrier(uintptr(add(frame, retoffset)), framesize-retoffset)
......@@ -249,11 +261,15 @@ func typedslicecopy(typ *_type, dst, src slice) int {
msanread(srcp, uintptr(n)*typ.size)
}
if writeBarrier.cgo {
cgoCheckSliceCopy(typ, dst, src, n)
}
// Note: No point in checking typ.kind&kindNoPointers here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
if !writeBarrierEnabled {
if !writeBarrier.needed {
memmove(dstp, srcp, uintptr(n)*typ.size)
return n
}
......
......@@ -399,7 +399,7 @@ func heapBitsBulkBarrier(p, size uintptr) {
if (p|size)&(sys.PtrSize-1) != 0 {
throw("heapBitsBulkBarrier: unaligned arguments")
}
if !writeBarrierEnabled {
if !writeBarrier.needed {
return
}
if !inheap(p) {
......@@ -466,7 +466,7 @@ func typeBitsBulkBarrier(typ *_type, p, size uintptr) {
println("runtime: typeBitsBulkBarrier with type ", *typ._string, " with GC prog")
throw("runtime: invalid typeBitsBulkBarrier")
}
if !writeBarrierEnabled {
if !writeBarrier.needed {
return
}
ptrmask := typ.gcdata
......
......@@ -209,7 +209,14 @@ func setGCPercent(in int32) (out int32) {
// Garbage collector phase.
// Indicates to write barrier and sychronization task to preform.
var gcphase uint32
var writeBarrierEnabled bool // compiler emits references to this in write barriers
// The compiler knows about this variable.
// If you change it, you must change the compiler too.
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
needed bool // whether we need a write barrier for current GC phase
cgo bool // whether we need a write barrier for a cgo check
}
// gcBlackenEnabled is 1 if mutator assists and background mark
// workers are allowed to blacken objects. This must only be set when
......@@ -240,7 +247,8 @@ const (
//go:nosplit
func setGCPhase(x uint32) {
atomic.Store(&gcphase, x)
writeBarrierEnabled = gcphase == _GCmark || gcphase == _GCmarktermination
writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
}
// gcMarkWorkerMode represents the mode that a concurrent mark worker
......
......@@ -780,7 +780,7 @@ const (
//
//go:nowritebarrier
func gcDrain(gcw *gcWork, flags gcDrainFlags) {
if !writeBarrierEnabled {
if !writeBarrier.needed {
throw("gcDrain phase incorrect")
}
......@@ -859,7 +859,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
// increments. It returns the amount of scan work performed.
//go:nowritebarrier
func gcDrainN(gcw *gcWork, scanWork int64) int64 {
if !writeBarrierEnabled {
if !writeBarrier.needed {
throw("gcDrainN phase incorrect")
}
......
......@@ -401,6 +401,13 @@ func parsedebugvars() {
if debug.gcstackbarrierall > 0 {
firstStackBarrierOffset = 0
}
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes.
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.enabled = true
}
}
// Poor mans 64-bit division.
......
......@@ -98,7 +98,7 @@ func growslice(t *slicetype, old slice, cap int) slice {
} else {
// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
p = newarray(et, uintptr(newcap))
if !writeBarrierEnabled {
if !writeBarrier.enabled {
memmove(p, old.array, lenmem)
} else {
for i := uintptr(0); i < lenmem; i += et.size {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment