Commit 5288fadb authored by Dmitry Vyukov's avatar Dmitry Vyukov

runtime: add tracing of runtime events

Add actual tracing of interesting runtime events.
Part of a larger tracing functionality:
https://docs.google.com/document/u/1/d/1FP5apqzBgr7ahCCgFO-yoVhk4YZrNIDNf9RybngBc14/pub
Full change:
https://codereview.appspot.com/146920043

Change-Id: Icccf54aea54e09350bb698ba6bf11532f9fbe6d3
Reviewed-on: https://go-review.googlesource.com/1451Reviewed-by: 's avatarRuss Cox <rsc@golang.org>
parent f30a2b9c
...@@ -96,7 +96,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -96,7 +96,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
if !block { if !block {
return false return false
} }
gopark(nil, nil, "chan send (nil chan)") gopark(nil, nil, "chan send (nil chan)", traceEvGoStop)
throw("unreachable") throw("unreachable")
} }
...@@ -178,7 +178,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -178,7 +178,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
mysg.selectdone = nil mysg.selectdone = nil
gp.param = nil gp.param = nil
c.sendq.enqueue(mysg) c.sendq.enqueue(mysg)
goparkunlock(&c.lock, "chan send") goparkunlock(&c.lock, "chan send", traceEvGoBlockSend)
// someone woke us up. // someone woke us up.
if mysg != gp.waiting { if mysg != gp.waiting {
...@@ -217,7 +217,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin ...@@ -217,7 +217,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
mysg.elem = nil mysg.elem = nil
mysg.selectdone = nil mysg.selectdone = nil
c.sendq.enqueue(mysg) c.sendq.enqueue(mysg)
goparkunlock(&c.lock, "chan send") goparkunlock(&c.lock, "chan send", traceEvGoBlockSend)
// someone woke us up - try again // someone woke us up - try again
if mysg.releasetime > 0 { if mysg.releasetime > 0 {
...@@ -340,7 +340,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r ...@@ -340,7 +340,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
if !block { if !block {
return return
} }
gopark(nil, nil, "chan receive (nil chan)") gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop)
throw("unreachable") throw("unreachable")
} }
...@@ -414,7 +414,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r ...@@ -414,7 +414,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
mysg.selectdone = nil mysg.selectdone = nil
gp.param = nil gp.param = nil
c.recvq.enqueue(mysg) c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive") goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv)
// someone woke us up // someone woke us up
if mysg != gp.waiting { if mysg != gp.waiting {
...@@ -471,7 +471,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r ...@@ -471,7 +471,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
mysg.selectdone = nil mysg.selectdone = nil
c.recvq.enqueue(mysg) c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive") goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv)
// someone woke us up - try again // someone woke us up - try again
if mysg.releasetime > 0 { if mysg.releasetime > 0 {
......
...@@ -368,6 +368,12 @@ func gcwork(force int32) { ...@@ -368,6 +368,12 @@ func gcwork(force int32) {
if force == 0 { if force == 0 {
gctimer.cycle.sweepterm = nanotime() gctimer.cycle.sweepterm = nanotime()
} }
if trace.enabled {
traceGoSched()
traceGCStart()
}
// Pick up the remaining unswept/not being swept spans before we STW // Pick up the remaining unswept/not being swept spans before we STW
for gosweepone() != ^uintptr(0) { for gosweepone() != ^uintptr(0) {
sweep.nbgsweep++ sweep.nbgsweep++
...@@ -423,6 +429,11 @@ func gcwork(force int32) { ...@@ -423,6 +429,11 @@ func gcwork(force int32) {
gccheckmark_m(startTime, eagersweep) gccheckmark_m(startTime, eagersweep)
}) })
if trace.enabled {
traceGCDone()
traceGoStart()
}
// all done // all done
mp.gcing = 0 mp.gcing = 0
......
...@@ -71,6 +71,9 @@ func purgecachedstats(c *mcache) { ...@@ -71,6 +71,9 @@ func purgecachedstats(c *mcache) {
h := &mheap_ h := &mheap_
memstats.heap_alloc += uint64(c.local_cachealloc) memstats.heap_alloc += uint64(c.local_cachealloc)
c.local_cachealloc = 0 c.local_cachealloc = 0
if trace.enabled {
traceHeapAlloc()
}
memstats.tinyallocs += uint64(c.local_tinyallocs) memstats.tinyallocs += uint64(c.local_tinyallocs)
c.local_tinyallocs = 0 c.local_tinyallocs = 0
memstats.nlookup += uint64(c.local_nlookup) memstats.nlookup += uint64(c.local_nlookup)
......
...@@ -127,7 +127,7 @@ func runfinq() { ...@@ -127,7 +127,7 @@ func runfinq() {
fing = gp fing = gp
fingwait = true fingwait = true
gp.issystem = true gp.issystem = true
goparkunlock(&finlock, "finalizer wait") goparkunlock(&finlock, "finalizer wait", traceEvGoBlock)
gp.issystem = false gp.issystem = false
continue continue
} }
......
...@@ -1003,6 +1003,11 @@ func mSpan_Sweep(s *mspan, preserve bool) bool { ...@@ -1003,6 +1003,11 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("MSpan_Sweep: bad span state") throw("MSpan_Sweep: bad span state")
} }
if trace.enabled {
traceGCSweepStart()
}
cl := s.sizeclass cl := s.sizeclass
size := s.elemsize size := s.elemsize
res := false res := false
...@@ -1132,6 +1137,10 @@ func mSpan_Sweep(s *mspan, preserve bool) bool { ...@@ -1132,6 +1137,10 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve) res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
// MCentral_FreeSpan updates sweepgen // MCentral_FreeSpan updates sweepgen
} }
if trace.enabled {
traceGCSweepDone()
traceNextGC()
}
return res return res
} }
...@@ -1212,12 +1221,20 @@ func gchelper() { ...@@ -1212,12 +1221,20 @@ func gchelper() {
_g_.m.traceback = 2 _g_.m.traceback = 2
gchelperstart() gchelperstart()
if trace.enabled {
traceGCScanStart()
}
// parallel mark for over GC roots // parallel mark for over GC roots
parfordo(work.markfor) parfordo(work.markfor)
if gcphase != _GCscan { if gcphase != _GCscan {
scanblock(0, 0, nil) // blocks in getfull scanblock(0, 0, nil) // blocks in getfull
} }
if trace.enabled {
traceGCScanDone()
}
nproc := work.nproc // work.nproc can change right after we increment work.ndone nproc := work.nproc // work.nproc can change right after we increment work.ndone
if xadd(&work.ndone, +1) == nproc-1 { if xadd(&work.ndone, +1) == nproc-1 {
notewakeup(&work.alldone) notewakeup(&work.alldone)
...@@ -1540,6 +1557,10 @@ func gc(start_time int64, eagersweep bool) { ...@@ -1540,6 +1557,10 @@ func gc(start_time int64, eagersweep bool) {
gp.gcworkdone = false // set to true in gcphasework gp.gcworkdone = false // set to true in gcphasework
} }
if trace.enabled {
traceGCScanStart()
}
parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot) parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot)
if work.nproc > 1 { if work.nproc > 1 {
noteclear(&work.alldone) noteclear(&work.alldone)
...@@ -1572,6 +1593,10 @@ func gc(start_time int64, eagersweep bool) { ...@@ -1572,6 +1593,10 @@ func gc(start_time int64, eagersweep bool) {
notesleep(&work.alldone) notesleep(&work.alldone)
} }
if trace.enabled {
traceGCScanDone()
}
shrinkfinish() shrinkfinish()
cachestats() cachestats()
...@@ -1581,6 +1606,9 @@ func gc(start_time int64, eagersweep bool) { ...@@ -1581,6 +1606,9 @@ func gc(start_time int64, eagersweep bool) {
// conservatively set next_gc to high value assuming that everything is live // conservatively set next_gc to high value assuming that everything is live
// concurrent/lazy sweep will reduce this number while discovering new garbage // concurrent/lazy sweep will reduce this number while discovering new garbage
memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100 memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100
if trace.enabled {
traceNextGC()
}
t4 := nanotime() t4 := nanotime()
atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
......
...@@ -67,7 +67,7 @@ func backgroundgc() { ...@@ -67,7 +67,7 @@ func backgroundgc() {
gcwork(0) gcwork(0)
lock(&bggc.lock) lock(&bggc.lock)
bggc.working = 0 bggc.working = 0
goparkunlock(&bggc.lock, "Concurrent GC wait") goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock)
} }
} }
...@@ -88,6 +88,6 @@ func bgsweep() { ...@@ -88,6 +88,6 @@ func bgsweep() {
continue continue
} }
sweep.parked = true sweep.parked = true
goparkunlock(&gclock, "GC sweep wait") goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
} }
} }
...@@ -217,6 +217,9 @@ func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan ...@@ -217,6 +217,9 @@ func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan
} }
} }
} }
if trace.enabled {
traceHeapAlloc()
}
unlock(&h.lock) unlock(&h.lock)
return s return s
} }
...@@ -440,6 +443,9 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) { ...@@ -440,6 +443,9 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) {
memstats.heap_objects-- memstats.heap_objects--
} }
mHeap_FreeSpanLocked(h, s, true, true) mHeap_FreeSpanLocked(h, s, true, true)
if trace.enabled {
traceHeapAlloc()
}
unlock(&h.lock) unlock(&h.lock)
}) })
} }
......
...@@ -71,7 +71,7 @@ type pollCache struct { ...@@ -71,7 +71,7 @@ type pollCache struct {
var ( var (
netpollInited uint32 netpollInited uint32
pollcache pollCache pollcache pollCache
) )
//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit //go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
...@@ -333,7 +333,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool { ...@@ -333,7 +333,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if waitio || netpollcheckerr(pd, mode) == 0 { if waitio || netpollcheckerr(pd, mode) == 0 {
gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait") gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet)
} }
// be careful to not lose concurrent READY notification // be careful to not lose concurrent READY notification
old := xchguintptr(gpp, 0) old := xchguintptr(gpp, 0)
......
...@@ -93,7 +93,7 @@ func main() { ...@@ -93,7 +93,7 @@ func main() {
// let the other goroutine finish printing the panic trace. // let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934. // Once it does, it will exit. See issue 3934.
if panicking != 0 { if panicking != 0 {
gopark(nil, nil, "panicwait") gopark(nil, nil, "panicwait", traceEvGoStop)
} }
exit(0) exit(0)
...@@ -117,7 +117,7 @@ func forcegchelper() { ...@@ -117,7 +117,7 @@ func forcegchelper() {
throw("forcegc: phase error") throw("forcegc: phase error")
} }
atomicstore(&forcegc.idle, 1) atomicstore(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)") goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock)
// this goroutine is explicitly resumed by sysmon // this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 { if debug.gctrace > 0 {
println("GC forced") println("GC forced")
...@@ -136,7 +136,7 @@ func Gosched() { ...@@ -136,7 +136,7 @@ func Gosched() {
// Puts the current goroutine into a waiting state and calls unlockf. // Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed. // If unlockf returns false, the goroutine is resumed.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) { func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte) {
mp := acquirem() mp := acquirem()
gp := mp.curg gp := mp.curg
status := readgstatus(gp) status := readgstatus(gp)
...@@ -146,6 +146,7 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s ...@@ -146,6 +146,7 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
mp.waitlock = lock mp.waitlock = lock
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
gp.waitreason = reason gp.waitreason = reason
mp.waittraceev = traceEv
releasem(mp) releasem(mp)
// can't do anything that might move the G between Ms here. // can't do anything that might move the G between Ms here.
mcall(park_m) mcall(park_m)
...@@ -153,8 +154,8 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s ...@@ -153,8 +154,8 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
// Puts the current goroutine into a waiting state and unlocks the lock. // Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp). // The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason string) { func goparkunlock(lock *mutex, reason string, traceEv byte) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason) gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv)
} }
func goready(gp *g) { func goready(gp *g) {
......
This diff is collapsed.
...@@ -277,6 +277,8 @@ type m struct { ...@@ -277,6 +277,8 @@ type m struct {
traceback uint8 traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer waitlock unsafe.Pointer
waittraceev byte
syscalltick uint32
//#ifdef GOOS_windows //#ifdef GOOS_windows
thread uintptr // thread handle thread uintptr // thread handle
// these are here because they are too large to be on the stack // these are here because they are too large to be on the stack
......
...@@ -173,7 +173,7 @@ func selparkcommit(gp *g, sel unsafe.Pointer) bool { ...@@ -173,7 +173,7 @@ func selparkcommit(gp *g, sel unsafe.Pointer) bool {
} }
func block() { func block() {
gopark(nil, nil, "select (no cases)") // forever gopark(nil, nil, "select (no cases)", traceEvGoStop) // forever
} }
// overwrites return pc on stack to signal which case of the select // overwrites return pc on stack to signal which case of the select
...@@ -363,7 +363,7 @@ loop: ...@@ -363,7 +363,7 @@ loop:
// wait for someone to wake us up // wait for someone to wake us up
gp.param = nil gp.param = nil
gopark(selparkcommit, unsafe.Pointer(sel), "select") gopark(selparkcommit, unsafe.Pointer(sel), "select", traceEvGoBlockSelect)
// someone woke us up // someone woke us up
sellock(sel) sellock(sel)
......
...@@ -97,7 +97,7 @@ func semacquire(addr *uint32, profile bool) { ...@@ -97,7 +97,7 @@ func semacquire(addr *uint32, profile bool) {
// Any semrelease after the cansemacquire knows we're waiting // Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep. // (we set nwait above), so go to sleep.
root.queue(addr, s) root.queue(addr, s)
goparkunlock(&root.lock, "semacquire") goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync)
if cansemacquire(addr) { if cansemacquire(addr) {
break break
} }
...@@ -234,7 +234,7 @@ func syncsemacquire(s *syncSema) { ...@@ -234,7 +234,7 @@ func syncsemacquire(s *syncSema) {
s.tail.next = w s.tail.next = w
} }
s.tail = w s.tail = w
goparkunlock(&s.lock, "semacquire") goparkunlock(&s.lock, "semacquire", traceEvGoBlockCond)
if t0 != 0 { if t0 != 0 {
blockevent(int64(w.releasetime)-t0, 2) blockevent(int64(w.releasetime)-t0, 2)
} }
...@@ -273,7 +273,7 @@ func syncsemrelease(s *syncSema, n uint32) { ...@@ -273,7 +273,7 @@ func syncsemrelease(s *syncSema, n uint32) {
s.tail.next = w s.tail.next = w
} }
s.tail = w s.tail = w
goparkunlock(&s.lock, "semarelease") goparkunlock(&s.lock, "semarelease", traceEvGoBlockCond)
releaseSudog(w) releaseSudog(w)
} else { } else {
unlock(&s.lock) unlock(&s.lock)
......
...@@ -721,7 +721,7 @@ func newstack() { ...@@ -721,7 +721,7 @@ func newstack() {
// Act like goroutine called runtime.Gosched. // Act like goroutine called runtime.Gosched.
casgstatus(gp, _Gwaiting, _Grunning) casgstatus(gp, _Gwaiting, _Grunning)
gosched_m(gp) // never return gopreempt_m(gp) // never return
} }
// Allocate a bigger segment and move the stack. // Allocate a bigger segment and move the stack.
......
...@@ -56,7 +56,7 @@ func timeSleep(ns int64) { ...@@ -56,7 +56,7 @@ func timeSleep(ns int64) {
t.arg = getg() t.arg = getg()
lock(&timers.lock) lock(&timers.lock)
addtimerLocked(t) addtimerLocked(t)
goparkunlock(&timers.lock, "sleep") goparkunlock(&timers.lock, "sleep", traceEvGoSleep)
} }
// startTimer adds t to the timer heap. // startTimer adds t to the timer heap.
...@@ -200,7 +200,7 @@ func timerproc() { ...@@ -200,7 +200,7 @@ func timerproc() {
if delta < 0 || faketime > 0 { if delta < 0 || faketime > 0 {
// No timers left - put goroutine to sleep. // No timers left - put goroutine to sleep.
timers.rescheduling = true timers.rescheduling = true
goparkunlock(&timers.lock, "timer goroutine (idle)") goparkunlock(&timers.lock, "timer goroutine (idle)", traceEvGoBlock)
continue continue
} }
// At least one timer pending. Sleep until then. // At least one timer pending. Sleep until then.
......
...@@ -292,7 +292,7 @@ func ReadTrace() []byte { ...@@ -292,7 +292,7 @@ func ReadTrace() []byte {
// Wait for new data. // Wait for new data.
if trace.fullHead == nil && !trace.shutdown { if trace.fullHead == nil && !trace.shutdown {
trace.reader = getg() trace.reader = getg()
goparkunlock(&trace.lock, "trace reader (blocked)" /*, traceEvGoBlock*/) goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock)
lock(&trace.lock) lock(&trace.lock)
} }
// Write a buffer. // Write a buffer.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment