Commit 5288fadb authored by Dmitry Vyukov's avatar Dmitry Vyukov

runtime: add tracing of runtime events

Add actual tracing of interesting runtime events.
Part of a larger tracing functionality:
https://docs.google.com/document/u/1/d/1FP5apqzBgr7ahCCgFO-yoVhk4YZrNIDNf9RybngBc14/pub
Full change:
https://codereview.appspot.com/146920043

Change-Id: Icccf54aea54e09350bb698ba6bf11532f9fbe6d3
Reviewed-on: https://go-review.googlesource.com/1451Reviewed-by: 's avatarRuss Cox <rsc@golang.org>
parent f30a2b9c
......@@ -96,7 +96,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
if !block {
return false
}
gopark(nil, nil, "chan send (nil chan)")
gopark(nil, nil, "chan send (nil chan)", traceEvGoStop)
throw("unreachable")
}
......@@ -178,7 +178,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
mysg.selectdone = nil
gp.param = nil
c.sendq.enqueue(mysg)
goparkunlock(&c.lock, "chan send")
goparkunlock(&c.lock, "chan send", traceEvGoBlockSend)
// someone woke us up.
if mysg != gp.waiting {
......@@ -217,7 +217,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
mysg.elem = nil
mysg.selectdone = nil
c.sendq.enqueue(mysg)
goparkunlock(&c.lock, "chan send")
goparkunlock(&c.lock, "chan send", traceEvGoBlockSend)
// someone woke us up - try again
if mysg.releasetime > 0 {
......@@ -340,7 +340,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
if !block {
return
}
gopark(nil, nil, "chan receive (nil chan)")
gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop)
throw("unreachable")
}
......@@ -414,7 +414,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
mysg.selectdone = nil
gp.param = nil
c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive")
goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv)
// someone woke us up
if mysg != gp.waiting {
......@@ -471,7 +471,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
mysg.selectdone = nil
c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive")
goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv)
// someone woke us up - try again
if mysg.releasetime > 0 {
......
......@@ -368,6 +368,12 @@ func gcwork(force int32) {
if force == 0 {
gctimer.cycle.sweepterm = nanotime()
}
if trace.enabled {
traceGoSched()
traceGCStart()
}
// Pick up the remaining unswept/not being swept spans before we STW
for gosweepone() != ^uintptr(0) {
sweep.nbgsweep++
......@@ -423,6 +429,11 @@ func gcwork(force int32) {
gccheckmark_m(startTime, eagersweep)
})
if trace.enabled {
traceGCDone()
traceGoStart()
}
// all done
mp.gcing = 0
......
......@@ -71,6 +71,9 @@ func purgecachedstats(c *mcache) {
h := &mheap_
memstats.heap_alloc += uint64(c.local_cachealloc)
c.local_cachealloc = 0
if trace.enabled {
traceHeapAlloc()
}
memstats.tinyallocs += uint64(c.local_tinyallocs)
c.local_tinyallocs = 0
memstats.nlookup += uint64(c.local_nlookup)
......
......@@ -127,7 +127,7 @@ func runfinq() {
fing = gp
fingwait = true
gp.issystem = true
goparkunlock(&finlock, "finalizer wait")
goparkunlock(&finlock, "finalizer wait", traceEvGoBlock)
gp.issystem = false
continue
}
......
......@@ -1003,6 +1003,11 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("MSpan_Sweep: bad span state")
}
if trace.enabled {
traceGCSweepStart()
}
cl := s.sizeclass
size := s.elemsize
res := false
......@@ -1132,6 +1137,10 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
// MCentral_FreeSpan updates sweepgen
}
if trace.enabled {
traceGCSweepDone()
traceNextGC()
}
return res
}
......@@ -1212,12 +1221,20 @@ func gchelper() {
_g_.m.traceback = 2
gchelperstart()
if trace.enabled {
traceGCScanStart()
}
// parallel mark for over GC roots
parfordo(work.markfor)
if gcphase != _GCscan {
scanblock(0, 0, nil) // blocks in getfull
}
if trace.enabled {
traceGCScanDone()
}
nproc := work.nproc // work.nproc can change right after we increment work.ndone
if xadd(&work.ndone, +1) == nproc-1 {
notewakeup(&work.alldone)
......@@ -1540,6 +1557,10 @@ func gc(start_time int64, eagersweep bool) {
gp.gcworkdone = false // set to true in gcphasework
}
if trace.enabled {
traceGCScanStart()
}
parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot)
if work.nproc > 1 {
noteclear(&work.alldone)
......@@ -1572,6 +1593,10 @@ func gc(start_time int64, eagersweep bool) {
notesleep(&work.alldone)
}
if trace.enabled {
traceGCScanDone()
}
shrinkfinish()
cachestats()
......@@ -1581,6 +1606,9 @@ func gc(start_time int64, eagersweep bool) {
// conservatively set next_gc to high value assuming that everything is live
// concurrent/lazy sweep will reduce this number while discovering new garbage
memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100
if trace.enabled {
traceNextGC()
}
t4 := nanotime()
atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
......
......@@ -67,7 +67,7 @@ func backgroundgc() {
gcwork(0)
lock(&bggc.lock)
bggc.working = 0
goparkunlock(&bggc.lock, "Concurrent GC wait")
goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock)
}
}
......@@ -88,6 +88,6 @@ func bgsweep() {
continue
}
sweep.parked = true
goparkunlock(&gclock, "GC sweep wait")
goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock)
}
}
......@@ -217,6 +217,9 @@ func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan
}
}
}
if trace.enabled {
traceHeapAlloc()
}
unlock(&h.lock)
return s
}
......@@ -440,6 +443,9 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) {
memstats.heap_objects--
}
mHeap_FreeSpanLocked(h, s, true, true)
if trace.enabled {
traceHeapAlloc()
}
unlock(&h.lock)
})
}
......
......@@ -71,7 +71,7 @@ type pollCache struct {
var (
netpollInited uint32
pollcache pollCache
pollcache pollCache
)
//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
......@@ -333,7 +333,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if waitio || netpollcheckerr(pd, mode) == 0 {
gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait")
gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet)
}
// be careful to not lose concurrent READY notification
old := xchguintptr(gpp, 0)
......
......@@ -93,7 +93,7 @@ func main() {
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
if panicking != 0 {
gopark(nil, nil, "panicwait")
gopark(nil, nil, "panicwait", traceEvGoStop)
}
exit(0)
......@@ -117,7 +117,7 @@ func forcegchelper() {
throw("forcegc: phase error")
}
atomicstore(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)")
goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock)
// this goroutine is explicitly resumed by sysmon
if debug.gctrace > 0 {
println("GC forced")
......@@ -136,7 +136,7 @@ func Gosched() {
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) {
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte) {
mp := acquirem()
gp := mp.curg
status := readgstatus(gp)
......@@ -146,6 +146,7 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
mp.waitlock = lock
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
gp.waitreason = reason
mp.waittraceev = traceEv
releasem(mp)
// can't do anything that might move the G between Ms here.
mcall(park_m)
......@@ -153,8 +154,8 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason string) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason)
func goparkunlock(lock *mutex, reason string, traceEv byte) {
gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv)
}
func goready(gp *g) {
......
This diff is collapsed.
......@@ -277,6 +277,8 @@ type m struct {
traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
waitlock unsafe.Pointer
waittraceev byte
syscalltick uint32
//#ifdef GOOS_windows
thread uintptr // thread handle
// these are here because they are too large to be on the stack
......
......@@ -173,7 +173,7 @@ func selparkcommit(gp *g, sel unsafe.Pointer) bool {
}
func block() {
gopark(nil, nil, "select (no cases)") // forever
gopark(nil, nil, "select (no cases)", traceEvGoStop) // forever
}
// overwrites return pc on stack to signal which case of the select
......@@ -363,7 +363,7 @@ loop:
// wait for someone to wake us up
gp.param = nil
gopark(selparkcommit, unsafe.Pointer(sel), "select")
gopark(selparkcommit, unsafe.Pointer(sel), "select", traceEvGoBlockSelect)
// someone woke us up
sellock(sel)
......
......@@ -97,7 +97,7 @@ func semacquire(addr *uint32, profile bool) {
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
root.queue(addr, s)
goparkunlock(&root.lock, "semacquire")
goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync)
if cansemacquire(addr) {
break
}
......@@ -234,7 +234,7 @@ func syncsemacquire(s *syncSema) {
s.tail.next = w
}
s.tail = w
goparkunlock(&s.lock, "semacquire")
goparkunlock(&s.lock, "semacquire", traceEvGoBlockCond)
if t0 != 0 {
blockevent(int64(w.releasetime)-t0, 2)
}
......@@ -273,7 +273,7 @@ func syncsemrelease(s *syncSema, n uint32) {
s.tail.next = w
}
s.tail = w
goparkunlock(&s.lock, "semarelease")
goparkunlock(&s.lock, "semarelease", traceEvGoBlockCond)
releaseSudog(w)
} else {
unlock(&s.lock)
......
......@@ -721,7 +721,7 @@ func newstack() {
// Act like goroutine called runtime.Gosched.
casgstatus(gp, _Gwaiting, _Grunning)
gosched_m(gp) // never return
gopreempt_m(gp) // never return
}
// Allocate a bigger segment and move the stack.
......
......@@ -56,7 +56,7 @@ func timeSleep(ns int64) {
t.arg = getg()
lock(&timers.lock)
addtimerLocked(t)
goparkunlock(&timers.lock, "sleep")
goparkunlock(&timers.lock, "sleep", traceEvGoSleep)
}
// startTimer adds t to the timer heap.
......@@ -200,7 +200,7 @@ func timerproc() {
if delta < 0 || faketime > 0 {
// No timers left - put goroutine to sleep.
timers.rescheduling = true
goparkunlock(&timers.lock, "timer goroutine (idle)")
goparkunlock(&timers.lock, "timer goroutine (idle)", traceEvGoBlock)
continue
}
// At least one timer pending. Sleep until then.
......
......@@ -292,7 +292,7 @@ func ReadTrace() []byte {
// Wait for new data.
if trace.fullHead == nil && !trace.shutdown {
trace.reader = getg()
goparkunlock(&trace.lock, "trace reader (blocked)" /*, traceEvGoBlock*/)
goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock)
lock(&trace.lock)
}
// Write a buffer.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment