Commit 181e26b9 authored by Russ Cox's avatar Russ Cox

runtime: replace func-based write barrier skipping with type-based

This CL revises CL 7504 to use explicitly uintptr types for the
struct fields that are going to be updated sometimes without
write barriers. The result is that the fields are now updated *always*
without write barriers.

This approach has two important properties:

1) Now the GC never looks at the field, so if the missing reference
could cause a problem, it will do so all the time, not just when the
write barrier is missed at just the right moment.

2) Now a write barrier never happens for the field, avoiding the
(correct) detection of inconsistent write barriers when GODEBUG=wbshadow=1.

Change-Id: Iebd3962c727c0046495cc08914a8dc0808460e0e
Reviewed-on: https://go-review.googlesource.com/9019Reviewed-by: 's avatarAustin Clements <austin@google.com>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent c776592a
...@@ -817,8 +817,8 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { ...@@ -817,8 +817,8 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
mp := acquirem() mp := acquirem()
var persistent *persistentAlloc var persistent *persistentAlloc
if mp != nil && mp.p != nil { if mp != nil && mp.p != 0 {
persistent = &mp.p.palloc persistent = &mp.p.ptr().palloc
} else { } else {
lock(&globalAlloc.mutex) lock(&globalAlloc.mutex)
persistent = &globalAlloc.persistentAlloc persistent = &globalAlloc.persistentAlloc
......
...@@ -92,10 +92,6 @@ func needwb() bool { ...@@ -92,10 +92,6 @@ func needwb() bool {
// the p associated with an m. We use the fact that m.p == nil to indicate // the p associated with an m. We use the fact that m.p == nil to indicate
// that we are in one these critical section and throw if the write is of // that we are in one these critical section and throw if the write is of
// a pointer to a heap object. // a pointer to a heap object.
// The p, m, and g pointers are the pointers that are used by the scheduler
// and need to be operated on without write barriers. We use
// the setPNoWriteBarrier, setMNoWriteBarrier and setGNowriteBarrier to
// avoid having to do the write barrier.
//go:nosplit //go:nosplit
func writebarrierptr_nostore1(dst *uintptr, src uintptr) { func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
mp := acquirem() mp := acquirem()
...@@ -104,7 +100,7 @@ func writebarrierptr_nostore1(dst *uintptr, src uintptr) { ...@@ -104,7 +100,7 @@ func writebarrierptr_nostore1(dst *uintptr, src uintptr) {
return return
} }
systemstack(func() { systemstack(func() {
if mp.p == nil && memstats.enablegc && !mp.inwb && inheap(src) { if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
throw("writebarrierptr_nostore1 called with mp.p == nil") throw("writebarrierptr_nostore1 called with mp.p == nil")
} }
mp.inwb = true mp.inwb = true
......
...@@ -275,24 +275,22 @@ func net_runtime_pollUnblock(pd *pollDesc) { ...@@ -275,24 +275,22 @@ func net_runtime_pollUnblock(pd *pollDesc) {
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg // make pd ready, newly runnable goroutines (if any) are returned in rg/wg
// May run during STW, so write barriers are not allowed. // May run during STW, so write barriers are not allowed.
// Eliminating WB calls using setGNoWriteBarrier are safe since the gs are
// reachable through allg.
//go:nowritebarrier //go:nowritebarrier
func netpollready(gpp **g, pd *pollDesc, mode int32) { func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
var rg, wg *g var rg, wg guintptr
if mode == 'r' || mode == 'r'+'w' { if mode == 'r' || mode == 'r'+'w' {
setGNoWriteBarrier(&rg, netpollunblock(pd, 'r', true)) rg.set(netpollunblock(pd, 'r', true))
} }
if mode == 'w' || mode == 'r'+'w' { if mode == 'w' || mode == 'r'+'w' {
setGNoWriteBarrier(&wg, netpollunblock(pd, 'w', true)) wg.set(netpollunblock(pd, 'w', true))
} }
if rg != nil { if rg != 0 {
setGNoWriteBarrier(&rg.schedlink, *gpp) rg.ptr().schedlink = *gpp
setGNoWriteBarrier(gpp, rg) *gpp = rg
} }
if wg != nil { if wg != 0 {
setGNoWriteBarrier(&wg.schedlink, *gpp) wg.ptr().schedlink = *gpp
setGNoWriteBarrier(gpp, wg) *gpp = wg
} }
} }
......
...@@ -55,9 +55,9 @@ func netpollarm(pd *pollDesc, mode int) { ...@@ -55,9 +55,9 @@ func netpollarm(pd *pollDesc, mode int) {
// polls for ready network connections // polls for ready network connections
// returns list of goroutines that become runnable // returns list of goroutines that become runnable
func netpoll(block bool) (gp *g) { func netpoll(block bool) *g {
if epfd == -1 { if epfd == -1 {
return return nil
} }
waitms := int32(-1) waitms := int32(-1)
if !block { if !block {
...@@ -73,6 +73,7 @@ retry: ...@@ -73,6 +73,7 @@ retry:
} }
goto retry goto retry
} }
var gp guintptr
for i := int32(0); i < n; i++ { for i := int32(0); i < n; i++ {
ev := &events[i] ev := &events[i]
if ev.events == 0 { if ev.events == 0 {
...@@ -87,11 +88,12 @@ retry: ...@@ -87,11 +88,12 @@ retry:
} }
if mode != 0 { if mode != 0 {
pd := *(**pollDesc)(unsafe.Pointer(&ev.data)) pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
netpollready(&gp, pd, mode)
} }
} }
if block && gp == nil { if block && gp == 0 {
goto retry goto retry
} }
return gp return gp.ptr()
} }
...@@ -62,9 +62,9 @@ func netpollarm(pd *pollDesc, mode int) { ...@@ -62,9 +62,9 @@ func netpollarm(pd *pollDesc, mode int) {
// Polls for ready network connections. // Polls for ready network connections.
// Returns list of goroutines that become runnable. // Returns list of goroutines that become runnable.
func netpoll(block bool) (gp *g) { func netpoll(block bool) *g {
if kq == -1 { if kq == -1 {
return return nil
} }
var tp *timespec var tp *timespec
var ts timespec var ts timespec
...@@ -81,6 +81,7 @@ retry: ...@@ -81,6 +81,7 @@ retry:
} }
goto retry goto retry
} }
var gp guintptr
for i := 0; i < int(n); i++ { for i := 0; i < int(n); i++ {
ev := &events[i] ev := &events[i]
var mode int32 var mode int32
...@@ -91,11 +92,11 @@ retry: ...@@ -91,11 +92,11 @@ retry:
mode += 'w' mode += 'w'
} }
if mode != 0 { if mode != 0 {
netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode) netpollready(&gp, (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
} }
} }
if block && gp == nil { if block && gp == 0 {
goto retry goto retry
} }
return gp return gp.ptr()
} }
...@@ -179,9 +179,9 @@ var netpolllasterr int32 ...@@ -179,9 +179,9 @@ var netpolllasterr int32
// polls for ready network connections // polls for ready network connections
// returns list of goroutines that become runnable // returns list of goroutines that become runnable
func netpoll(block bool) (gp *g) { func netpoll(block bool) *g {
if portfd == -1 { if portfd == -1 {
return return nil
} }
var wait *timespec var wait *timespec
...@@ -201,7 +201,7 @@ retry: ...@@ -201,7 +201,7 @@ retry:
goto retry goto retry
} }
gp = nil var gp guintptr
for i := 0; i < int(n); i++ { for i := 0; i < int(n); i++ {
ev := &events[i] ev := &events[i]
...@@ -232,12 +232,12 @@ retry: ...@@ -232,12 +232,12 @@ retry:
} }
if mode != 0 { if mode != 0 {
netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode) netpollready(&gp, pd, mode)
} }
} }
if block && gp == nil { if block && gp == 0 {
goto retry goto retry
} }
return gp return gp.ptr()
} }
...@@ -63,14 +63,13 @@ func netpoll(block bool) *g { ...@@ -63,14 +63,13 @@ func netpoll(block bool) *g {
var wait, qty, key, flags, n, i uint32 var wait, qty, key, flags, n, i uint32
var errno int32 var errno int32
var op *net_op var op *net_op
var gp *g var gp guintptr
mp := getg().m mp := getg().m
if iocphandle == _INVALID_HANDLE_VALUE { if iocphandle == _INVALID_HANDLE_VALUE {
return nil return nil
} }
gp = nil
wait = 0 wait = 0
if block { if block {
wait = _INFINITE wait = _INFINITE
...@@ -125,13 +124,13 @@ retry: ...@@ -125,13 +124,13 @@ retry:
mp.blocked = false mp.blocked = false
handlecompletion(&gp, op, errno, qty) handlecompletion(&gp, op, errno, qty)
} }
if block && gp == nil { if block && gp == 0 {
goto retry goto retry
} }
return gp return gp.ptr()
} }
func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) { func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) {
if op == nil { if op == nil {
throw("netpoll: GetQueuedCompletionStatus returned op == nil") throw("netpoll: GetQueuedCompletionStatus returned op == nil")
} }
...@@ -142,5 +141,5 @@ func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) { ...@@ -142,5 +141,5 @@ func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
} }
op.errno = errno op.errno = errno
op.qty = qty op.qty = qty
netpollready((**g)(noescape(unsafe.Pointer(gpp))), op.pd, mode) netpollready(gpp, op.pd, mode)
} }
...@@ -363,8 +363,7 @@ func stdcall(fn stdFunction) uintptr { ...@@ -363,8 +363,7 @@ func stdcall(fn stdFunction) uintptr {
if mp.profilehz != 0 { if mp.profilehz != 0 {
// leave pc/sp for cpu profiler // leave pc/sp for cpu profiler
// gp is on allg, so this WB can be eliminated. mp.libcallg.set(gp)
setGNoWriteBarrier(&mp.libcallg, gp)
mp.libcallpc = getcallerpc(unsafe.Pointer(&fn)) mp.libcallpc = getcallerpc(unsafe.Pointer(&fn))
// sp must be the last, because once async cpu profiler finds // sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them // all three values to be non-zero, it will use them
......
...@@ -81,7 +81,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int { ...@@ -81,7 +81,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
} }
Throw: Throw:
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
startpanic() startpanic()
print(notestr, "\n") print(notestr, "\n")
print("PC=", hex(c.pc()), "\n") print("PC=", hex(c.pc()), "\n")
......
...@@ -165,7 +165,7 @@ func newdefer(siz int32) *_defer { ...@@ -165,7 +165,7 @@ func newdefer(siz int32) *_defer {
sc := deferclass(uintptr(siz)) sc := deferclass(uintptr(siz))
mp := acquirem() mp := acquirem()
if sc < uintptr(len(p{}.deferpool)) { if sc < uintptr(len(p{}.deferpool)) {
pp := mp.p pp := mp.p.ptr()
if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
lock(&sched.deferlock) lock(&sched.deferlock)
for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
...@@ -223,7 +223,7 @@ func freedefer(d *_defer) { ...@@ -223,7 +223,7 @@ func freedefer(d *_defer) {
sc := deferclass(uintptr(d.siz)) sc := deferclass(uintptr(d.siz))
if sc < uintptr(len(p{}.deferpool)) { if sc < uintptr(len(p{}.deferpool)) {
mp := acquirem() mp := acquirem()
pp := mp.p pp := mp.p.ptr()
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
// Transfer half of local cache to the central cache. // Transfer half of local cache to the central cache.
var first, last *_defer var first, last *_defer
......
...@@ -208,7 +208,7 @@ func acquireSudog() *sudog { ...@@ -208,7 +208,7 @@ func acquireSudog() *sudog {
// The acquirem/releasem increments m.locks during new(sudog), // The acquirem/releasem increments m.locks during new(sudog),
// which keeps the garbage collector from being invoked. // which keeps the garbage collector from being invoked.
mp := acquirem() mp := acquirem()
pp := mp.p pp := mp.p.ptr()
if len(pp.sudogcache) == 0 { if len(pp.sudogcache) == 0 {
lock(&sched.sudoglock) lock(&sched.sudoglock)
// First, try to grab a batch from central cache. // First, try to grab a batch from central cache.
...@@ -257,7 +257,7 @@ func releaseSudog(s *sudog) { ...@@ -257,7 +257,7 @@ func releaseSudog(s *sudog) {
throw("runtime: releaseSudog with non-nil gp.param") throw("runtime: releaseSudog with non-nil gp.param")
} }
mp := acquirem() // avoid rescheduling to another P mp := acquirem() // avoid rescheduling to another P
pp := mp.p pp := mp.p.ptr()
if len(pp.sudogcache) == cap(pp.sudogcache) { if len(pp.sudogcache) == cap(pp.sudogcache) {
// Transfer half of local cache to the central cache. // Transfer half of local cache to the central cache.
var first, last *sudog var first, last *sudog
......
This diff is collapsed.
...@@ -87,8 +87,28 @@ type eface struct { ...@@ -87,8 +87,28 @@ type eface struct {
data unsafe.Pointer data unsafe.Pointer
} }
// The guintptr, muintptr, and puintptr are all used to bypass write barriers.
// It is particularly important to avoid write barriers when the current P has
// been released, because the GC thinks the world is stopped, and an
// unexpected write barrier would not be synchronized with the GC,
// which can lead to a half-executed write barrier that has marked the object
// but not queued it. If the GC skips the object and completes before the
// queuing can occur, it will incorrectly free the object.
//
// We tried using special assignment functions invoked only when not
// holding a running P, but then some updates to a particular memory
// word went through write barriers and some did not. This breaks the
// write barrier shadow checking mode, and it is also scary: better to have
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// from stack variables.
// A guintptr holds a goroutine pointer, but typed as a uintptr // A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state. // to bypass write barriers. It is used in the Gobuf goroutine state
// and in scheduling lists that are manipulated without a P.
// //
// The Gobuf.g goroutine pointer is almost always updated by assembly code. // The Gobuf.g goroutine pointer is almost always updated by assembly code.
// In one of the few places it is updated by Go code - func save - it must be // In one of the few places it is updated by Go code - func save - it must be
...@@ -107,41 +127,18 @@ type eface struct { ...@@ -107,41 +127,18 @@ type eface struct {
// alternate arena. Using guintptr doesn't make that problem any worse. // alternate arena. Using guintptr doesn't make that problem any worse.
type guintptr uintptr type guintptr uintptr
func (gp guintptr) ptr() *g { func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
return (*g)(unsafe.Pointer(gp)) func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
}
// ps, ms, gs, and mcache are structures that must be manipulated at a level type puintptr uintptr
// lower than that of the normal Go language. For example the routine that
// stops the world removes the p from the m structure informing the GC that
// this P is stopped and then it moves the g to the global runnable queue.
// If write barriers were allowed to happen at this point not only does
// the GC think the thread is stopped but the underlying structures
// like a p or m are not in a state that is not coherent enough to
// support the write barrier actions.
// This is particularly painful since a partially executed write barrier
// may mark the object but be delinquent in informing the GC that the
// object needs to be scanned.
// setGNoWriteBarriers does *gdst = gval without a write barrier.
func setGNoWriteBarrier(gdst **g, gval *g) {
*(*uintptr)(unsafe.Pointer(gdst)) = uintptr(unsafe.Pointer(gval))
}
// setMNoWriteBarriers does *mdst = mval without a write barrier. func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
func setMNoWriteBarrier(mdst **m, mval *m) { func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
*(*uintptr)(unsafe.Pointer(mdst)) = uintptr(unsafe.Pointer(mval))
}
// setPNoWriteBarriers does *pdst = pval without a write barrier. type muintptr uintptr
func setPNoWriteBarrier(pdst **p, pval *p) {
*(*uintptr)(unsafe.Pointer(pdst)) = uintptr(unsafe.Pointer(pval))
}
// setMcacheNoWriteBarriers does *mcachedst = mcacheval without a write barrier. func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
func setMcacheNoWriteBarrier(mcachedst **mcache, mcacheval *mcache) { func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
*(*uintptr)(unsafe.Pointer(mcachedst)) = uintptr(unsafe.Pointer(mcacheval))
}
type gobuf struct { type gobuf struct {
// The offsets of sp, pc, and g are known to (hard-coded in) libmach. // The offsets of sp, pc, and g are known to (hard-coded in) libmach.
...@@ -224,7 +221,7 @@ type g struct { ...@@ -224,7 +221,7 @@ type g struct {
goid int64 goid int64
waitsince int64 // approx time when the g become blocked waitsince int64 // approx time when the g become blocked
waitreason string // if status==gwaiting waitreason string // if status==gwaiting
schedlink *g schedlink guintptr
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc preemptscan bool // preempted g does scan for gc
...@@ -263,11 +260,11 @@ type m struct { ...@@ -263,11 +260,11 @@ type m struct {
procid uint64 // for debuggers, but offset not hard-coded procid uint64 // for debuggers, but offset not hard-coded
gsignal *g // signal-handling g gsignal *g // signal-handling g
tls [4]uintptr // thread-local storage (for x86 extern register) tls [4]uintptr // thread-local storage (for x86 extern register)
mstartfn uintptr // TODO: type as func(); note: this is a non-heap allocated func() mstartfn func()
curg *g // current running goroutine curg *g // current running goroutine
caughtsig *g // goroutine running during fatal signal caughtsig guintptr // goroutine running during fatal signal
p *p // attached p for executing go code (nil if not executing go code) p puintptr // attached p for executing go code (nil if not executing go code)
nextp *p nextp puintptr
id int32 id int32
mallocing int32 mallocing int32
throwing int32 throwing int32
...@@ -286,7 +283,7 @@ type m struct { ...@@ -286,7 +283,7 @@ type m struct {
ncgo int32 // number of cgo calls currently in progress ncgo int32 // number of cgo calls currently in progress
park note park note
alllink *m // on allm alllink *m // on allm
schedlink *m schedlink muintptr
machport uint32 // return address for mach ipc (os x) machport uint32 // return address for mach ipc (os x)
mcache *mcache mcache *mcache
lockedg *g lockedg *g
...@@ -315,7 +312,7 @@ type m struct { ...@@ -315,7 +312,7 @@ type m struct {
libcall libcall libcall libcall
libcallpc uintptr // for cpu profiler libcallpc uintptr // for cpu profiler
libcallsp uintptr libcallsp uintptr
libcallg *g libcallg guintptr
//#endif //#endif
//#ifdef GOOS_solaris //#ifdef GOOS_solaris
perrno *int32 // pointer to tls errno perrno *int32 // pointer to tls errno
...@@ -336,10 +333,10 @@ type p struct { ...@@ -336,10 +333,10 @@ type p struct {
id int32 id int32
status uint32 // one of pidle/prunning/... status uint32 // one of pidle/prunning/...
link *p link puintptr
schedtick uint32 // incremented on every scheduler call schedtick uint32 // incremented on every scheduler call
syscalltick uint32 // incremented on every system call syscalltick uint32 // incremented on every system call
m *m // back-link to associated m (nil if idle) m muintptr // back-link to associated m (nil if idle)
mcache *mcache mcache *mcache
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
...@@ -379,19 +376,19 @@ type schedt struct { ...@@ -379,19 +376,19 @@ type schedt struct {
goidgen uint64 goidgen uint64
midle *m // idle m's waiting for work midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work nmidlelocked int32 // number of locked m's waiting for work
mcount int32 // number of m's that have been created mcount int32 // number of m's that have been created
maxmcount int32 // maximum number of m's allowed (or die) maxmcount int32 // maximum number of m's allowed (or die)
pidle *p // idle p's pidle puintptr // idle p's
npidle uint32 npidle uint32
nmspinning uint32 nmspinning uint32
// Global runnable queue. // Global runnable queue.
runqhead *g runqhead guintptr
runqtail *g runqtail guintptr
runqsize int32 runqsize int32
// Global cache of dead G's. // Global cache of dead G's.
......
...@@ -100,7 +100,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { ...@@ -100,7 +100,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
} }
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
startpanic() startpanic()
if sig < uint32(len(sigtable)) { if sig < uint32(len(sigtable)) {
......
...@@ -136,7 +136,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { ...@@ -136,7 +136,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
} }
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
if crashing == 0 { if crashing == 0 {
startpanic() startpanic()
......
...@@ -95,7 +95,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { ...@@ -95,7 +95,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
} }
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
startpanic() startpanic()
if sig < uint32(len(sigtable)) { if sig < uint32(len(sigtable)) {
......
...@@ -108,7 +108,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { ...@@ -108,7 +108,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
} }
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
startpanic() startpanic()
if sig < uint32(len(sigtable)) { if sig < uint32(len(sigtable)) {
......
...@@ -113,7 +113,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { ...@@ -113,7 +113,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
} }
_g_.m.throwing = 1 _g_.m.throwing = 1
setGNoWriteBarrier(&_g_.m.caughtsig, gp) _g_.m.caughtsig.set(gp)
startpanic() startpanic()
if sig < uint32(len(sigtable)) { if sig < uint32(len(sigtable)) {
......
...@@ -680,7 +680,7 @@ func newstack() { ...@@ -680,7 +680,7 @@ func newstack() {
// it needs a lock held by the goroutine), that small preemption turns // it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock. // into a real deadlock.
if preempt { if preempt {
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.status != _Prunning { if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
// Let the goroutine keep running for now. // Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time. // gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard0 = gp.stack.lo + _StackGuard
...@@ -724,7 +724,7 @@ func newstack() { ...@@ -724,7 +724,7 @@ func newstack() {
if gp == thisg.m.g0 { if gp == thisg.m.g0 {
throw("runtime: preempt g0") throw("runtime: preempt g0")
} }
if thisg.m.p == nil && thisg.m.locks == 0 { if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not") throw("runtime: g is running but p is not")
} }
if gp.preemptscan { if gp.preemptscan {
......
...@@ -506,7 +506,7 @@ func traceEvent(ev byte, skip int, args ...uint64) { ...@@ -506,7 +506,7 @@ func traceEvent(ev byte, skip int, args ...uint64) {
// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it. // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) { func traceAcquireBuffer() (mp *m, pid int32, bufp **traceBuf) {
mp = acquirem() mp = acquirem()
if p := mp.p; p != nil { if p := mp.p.ptr(); p != nil {
return mp, p.id, &p.tracebuf return mp, p.id, &p.tracebuf
} }
lock(&trace.bufLock) lock(&trace.bufLock)
...@@ -732,7 +732,7 @@ func traceProcStop(pp *p) { ...@@ -732,7 +732,7 @@ func traceProcStop(pp *p) {
// to handle this we temporary employ the P. // to handle this we temporary employ the P.
mp := acquirem() mp := acquirem()
oldp := mp.p oldp := mp.p
mp.p = pp mp.p.set(pp)
traceEvent(traceEvProcStop, -1) traceEvent(traceEvProcStop, -1)
mp.p = oldp mp.p = oldp
releasem(mp) releasem(mp)
...@@ -806,7 +806,7 @@ func traceGoSysBlock(pp *p) { ...@@ -806,7 +806,7 @@ func traceGoSysBlock(pp *p) {
// to handle this we temporary employ the P. // to handle this we temporary employ the P.
mp := acquirem() mp := acquirem()
oldp := mp.p oldp := mp.p
mp.p = pp mp.p.set(pp)
traceEvent(traceEvGoSysBlock, -1) traceEvent(traceEvGoSysBlock, -1)
mp.p = oldp mp.p = oldp
releasem(mp) releasem(mp)
......
...@@ -528,7 +528,7 @@ func gcallers(gp *g, skip int, pcbuf []uintptr) int { ...@@ -528,7 +528,7 @@ func gcallers(gp *g, skip int, pcbuf []uintptr) int {
func showframe(f *_func, gp *g) bool { func showframe(f *_func, gp *g) bool {
g := getg() g := getg()
if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig) { if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
return true return true
} }
traceback := gotraceback(nil) traceback := gotraceback(nil)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment