Commit daaf361f authored by Peter Weinberger's avatar Peter Weinberger

internal/traceparser: provide parser that uses less space and parses segments of…

internal/traceparser: provide parser that uses less space and parses segments of runtime trace files

Traceparser generally takes 20-30% less space than internal/trace. The only
user of these pakcages is cmd/trace, and the new package lets it handle some
trace files that were too large. The new parser will also convert segments
of the raw trace file (e.g. the last 10 seconds) to Events. Trace files from
go 1.8 and before are not supported.

Change-Id: If83fa183246db8f75182ccd3ba8df07673c0ebd0
Reviewed-on: https://go-review.googlesource.com/c/137635
Run-TryBot: Peter Weinberger <pjw@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarHyang-Ah Hana Kim <hyangah@gmail.com>
parent 5ddb2091
......@@ -26,7 +26,7 @@ scheme.
# Download NaCl
Download nacl_sdk.zip file from
https://developers.google.com/native-client/dev/sdk/download
https://developer.chrome.com/native-client/sdk/download
and unpack it. I chose /opt/nacl_sdk.
# Update
......@@ -37,7 +37,7 @@ sdk. These are released every 6-8 weeks, in line with Chrome releases.
% cd /opt/nacl_sdk
% ./naclsdk update
At this time pepper_40 is the stable version. The NaCl port needs at least pepper_39
At this time pepper_49 is the stable version. The NaCl port needs at least pepper_39
to work. If naclsdk downloads a later version, please adjust accordingly.
The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and
......
......@@ -154,6 +154,9 @@ go src=..
trace
testdata
+
traceparser
testdata
+
io
+
mime
......
......@@ -272,6 +272,8 @@ var pkgDeps = map[string][]string{
"internal/goroot": {"L4", "OS"},
"internal/singleflight": {"sync"},
"internal/trace": {"L4", "OS"},
"internal/traceparser": {"L4", "internal/traceparser/filebuf"},
"internal/traceparser/filebuf": {"L4", "OS"},
"math/big": {"L4"},
"mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
"mime/quotedprintable": {"L4"},
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
// postProcess is a final check of consistency, and if all is well,
// adds links to Events
import (
"fmt"
)
type gStatus int
const (
gDead gStatus = iota
gRunnable
gRunning
gWaiting
)
// This code is copied from internal/trace/parser.go. With greater understanding it could be
// simplified. Sets ev.P for GCStart, and set various Link fields
func (p *Parsed) postProcess(events []*Event) error {
type gdesc struct {
state gStatus
ev *Event
evStart *Event
evCreate *Event
evMarkAssist *Event
}
type pdesc struct {
running bool
g uint64
evSTW *Event
evSweep *Event
}
gs := make(map[uint64]gdesc)
ps := make(map[int]pdesc)
tasks := make(map[uint64]*Event) // task id to task creation events
activeRegions := make(map[uint64][]*Event) // goroutine id to stack of spans
gs[0] = gdesc{state: gRunning}
var evGC, evSTW *Event
checkRunning := func(pd pdesc, g gdesc, ev *Event, allowG0 bool) error {
if g.state != gRunning {
return fmt.Errorf("saw %v, but g %d is not running", ev, ev.G)
}
if pd.g != ev.G {
return fmt.Errorf("saw %v, but it's P is running %d, not %d", ev, pd.g, ev.G)
}
if !allowG0 && ev.G == 0 {
return fmt.Errorf("saw %v with unexpected g==0", ev)
}
return nil
}
for i, ev := range events {
g := gs[ev.G]
px := ps[int(ev.P)]
switch ev.Type {
case EvProcStart:
if px.running {
return fmt.Errorf("%d: running before start %s", i, ev)
}
px.running = true
case EvProcStop:
if !px.running {
return fmt.Errorf("%d: p %d not running %s", i, ev.P, ev)
}
if px.g != 0 {
return fmt.Errorf("p %d is running a goroutine %s", ev.P, ev)
}
px.running = false
case EvGCStart:
if evGC != nil {
return fmt.Errorf("GC already running %s, was %s", ev, evGC)
}
evGC = ev
// Attribute this to the global GC state.
ev.P = GCP
case EvGCDone:
if evGC == nil {
return fmt.Errorf("%d:%s bogus GC end", i, ev)
}
evGC.Link = ev
evGC = nil
case EvGCSTWStart:
evp := &evSTW
if p.Version < 1010 {
// Before 1.10, EvGCSTWStart was per-P.
evp = &px.evSTW
}
if *evp != nil {
return fmt.Errorf("STW %s still running at %s", *evp, ev)
}
*evp = ev
case EvGCSTWDone:
evp := &evSTW
if p.Version < 1010 {
// Before 1.10, EvGCSTWDone was per-P.
evp = &px.evSTW
}
if *evp == nil {
return fmt.Errorf("%d: no STW running %s", i, ev)
}
(*evp).Link = ev
*evp = nil
case EvGCMarkAssistStart:
if g.evMarkAssist != nil {
return fmt.Errorf("%d: MarkAssist %s is still running at %s",
i, g.evMarkAssist, ev)
}
g.evMarkAssist = ev
case EvGCMarkAssistDone:
// Unlike most events, mark assists can be in progress when a
// goroutine starts tracing, so we can't report an error here.
if g.evMarkAssist != nil {
g.evMarkAssist.Link = ev
g.evMarkAssist = nil
}
case EvGCSweepStart:
if px.evSweep != nil {
return fmt.Errorf("sweep not done %d: %s", i, ev)
}
px.evSweep = ev
case EvGCSweepDone:
if px.evSweep == nil {
return fmt.Errorf("%d: no sweep happening %s", i, ev)
}
px.evSweep.Link = ev
px.evSweep = nil
case EvGoWaiting:
if g.state != gRunnable {
return fmt.Errorf("not runnable before %d:%s", i, ev)
}
g.state = gWaiting
g.ev = ev
case EvGoInSyscall:
if g.state != gRunnable {
return fmt.Errorf("not runnable before %d:%s", i, ev)
}
g.state = gWaiting
g.ev = ev
case EvGoCreate:
if err := checkRunning(px, g, ev, true); err != nil {
return err
}
if _, ok := gs[ev.Args[0]]; ok {
return fmt.Errorf("%d: already exists %s", i, ev)
}
gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev}
case EvGoStart, EvGoStartLabel:
if g.state != gRunnable {
return fmt.Errorf("not runnable before start %d:%s %+v", i, ev, g)
}
if px.g != 0 {
return fmt.Errorf("%d: %s has p running %d already %v", i, ev, px.g, px)
}
g.state = gRunning
g.evStart = ev
px.g = ev.G
if g.evCreate != nil {
if p.Version < 1007 {
// +1 because symbolizer expects return pc.
//PJW: aren't doing < 1007. ev.stk = []*Frame{{PC: g.evCreate.args[1] + 1}}
} else {
ev.StkID = uint32(g.evCreate.Args[1])
}
g.evCreate = nil
}
if g.ev != nil {
g.ev.Link = ev
g.ev = nil
}
case EvGoEnd, EvGoStop:
if err := checkRunning(px, g, ev, false); err != nil {
return fmt.Errorf("%d: %s", i, err)
}
g.evStart.Link = ev
g.evStart = nil
g.state = gDead
px.g = 0
if ev.Type == EvGoEnd { // flush all active Regions
spans := activeRegions[ev.G]
for _, s := range spans {
s.Link = ev
}
delete(activeRegions, ev.G)
}
case EvGoSched, EvGoPreempt:
if err := checkRunning(px, g, ev, false); err != nil {
return err
}
g.state = gRunnable
g.evStart.Link = ev
g.evStart = nil
px.g = 0
g.ev = ev
case EvGoUnblock:
if g.state != gRunning { // PJW, error message
return fmt.Errorf("Event %d (%s) is not running at unblock %s", i, ev, g.state)
}
if ev.P != TimerP && px.g != ev.G {
// PJW: do better here.
return fmt.Errorf("%d: %s p %d is not running g", i, ev, px.g)
}
g1 := gs[ev.Args[0]]
if g1.state != gWaiting {
return fmt.Errorf("g %v is not waiting before unpark i=%d g1=%v %s",
ev.Args[0], i, g1, ev)
}
if g1.ev != nil && g1.ev.Type == EvGoBlockNet && ev.P != TimerP {
ev.P = NetpollP
}
if g1.ev != nil {
g1.ev.Link = ev
}
g1.state = gRunnable
g1.ev = ev
gs[ev.Args[0]] = g1
case EvGoSysCall:
if err := checkRunning(px, g, ev, false); err != nil {
return err
}
g.ev = ev
case EvGoSysBlock:
if err := checkRunning(px, g, ev, false); err != nil {
return err
}
g.state = gWaiting
g.evStart.Link = ev
g.evStart = nil
px.g = 0
case EvGoSysExit:
if g.state != gWaiting {
return fmt.Errorf("not waiting when %s", ev)
}
if g.ev != nil && g.ev.Type == EvGoSysCall {
g.ev.Link = ev
}
g.state = gRunnable
g.ev = ev
case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoBlockGC:
if err := checkRunning(px, g, ev, false); err != nil {
return err
}
g.state = gWaiting
g.ev = ev
g.evStart.Link = ev
g.evStart = nil
px.g = 0
case EvUserTaskCreate:
taskid := ev.Args[0]
if prevEv, ok := tasks[taskid]; ok {
return fmt.Errorf("task id conflicts (id:%d), %q vs %q", taskid, ev, prevEv)
}
tasks[ev.Args[0]] = ev
case EvUserTaskEnd:
if prevEv, ok := tasks[ev.Args[0]]; ok {
prevEv.Link = ev
ev.Link = prevEv
}
case EvUserRegion:
mode := ev.Args[1]
spans := activeRegions[ev.G]
if mode == 0 { // span start
activeRegions[ev.G] = append(spans, ev) // push
} else if mode == 1 { // span end
n := len(spans)
if n > 0 { // matching span start event is in the trace.
s := spans[n-1]
if s.Args[0] != ev.Args[0] || s.SArgs[0] != ev.SArgs[0] { // task id, span name mismatch
return fmt.Errorf("misuse of span in goroutine %d: span end %q when the inner-most active span start event is %q",
ev.G, ev, s)
}
// Link span start event with span end event
s.Link = ev
ev.Link = s
if n > 1 {
activeRegions[ev.G] = spans[:n-1]
} else {
delete(activeRegions, ev.G)
}
}
} else {
return fmt.Errorf("invalid user region, mode: %q", ev)
}
}
gs[ev.G] = g
ps[int(ev.P)] = px
}
return nil
}
func (g gStatus) String() string {
switch g {
case gDead:
return "gDead"
case gRunnable:
return "gRunnable"
case gRunning:
return "gRunning"
case gWaiting:
return "gWaiting"
}
return fmt.Sprintf("gStatus?%d", g)
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
import (
"fmt"
"sort"
)
// convert raw events into Events
func (p *Parsed) createEvents(f func(string)) error {
// multiple passes:
// create some Events
// sort them by time (and adjust their times to be nanonseconds)
// remove events not in the desired time interval
// make the events consistent (adding initializing events at the beginning)
// remove the futile events
// add links (and do final checking)
// shared by firstEvents
p.byproc = make(map[int][]*Event)
p.lastGs = make(map[int]uint64)
// p.batches are always sorted by time. otherwise a batch for one p that is totally
// later than another batch might be done first, confusing us about g's
for i, b := range p.batches {
if b.raws == nil {
continue
}
if err := p.firstEvents(b); err != nil {
return fmt.Errorf("%v", err) // PJW: this is not useful
}
// we done with b.raws now
p.batches[i].raws = nil
}
f("firstEvents finished")
sorted := []*Event{}
for _, v := range p.byproc {
sorted = append(sorted, v...)
}
// PJW: are we done with p.byproc now? Yes. This shrinks a little.
p.byproc = nil
// Why wasn't this done earlier? Or, why do it at all?
for _, ev := range sorted {
switch ev.Type {
case EvGoStartLocal:
ev.Type = EvGoStart
case EvGoUnblockLocal:
ev.Type = EvGoUnblock
case EvGoSysExitLocal:
ev.Type = EvGoSysExit
}
}
// change to nanoseconds
freq := 1e9 / float64(p.TicksPerSec)
for i, ev := range sorted {
// Move timers and syscalls to separate fake Ps.
// This could be done in the loop at line 38
// or maybe after robust fixes things.
if p.timerGoids[ev.G] && ev.Type == EvGoUnblock {
ev.Args[2] = uint64(ev.P) // save for robust() to use
ev.P = TimerP
}
// sometimes the ts is not what it should be
if ev.Type == EvGoSysExit {
ev.P = SyscallP
if ev.Args[2] != 0 {
// PJW: test for this being safe. There might be no preceding
// EvSysBlock, EvGoInSyscall, or its time might be later than this
ev.Ts = int64(ev.Args[2])
}
}
if ev.Type == EvGCStart {
ev.P = GCP
}
t := ev.Ts - p.minticks
if t < 0 {
return fmt.Errorf("event %d %s would be %d mints=%x", i, ev, t, p.minticks)
}
ev.Ts = int64(float64(ev.Ts-p.minticks) * freq)
}
// Stable for the case of equal Ts's.
sort.SliceStable(sorted, func(i, j int) bool { return sorted[i].Ts < sorted[j].Ts })
f("sorted")
// and ignore the ones with times out of bounds
firstwant, lastwant := 0, len(sorted)
for i, ev := range sorted {
if ev.Ts < p.MinWant {
firstwant = i + 1
} else if ev.Ts > p.MaxWant { // closed interval [minwant, maxwant]
lastwant = i
break // sorted by Ts
}
}
f("nanoseconds")
var err error
sorted, _, err = p.robust(sorted[firstwant:lastwant]) // PJW: copy info from aux
f("consistent")
if err != nil {
return err
}
events, cnt := p.removeFutile(sorted) // err is always nil here.
f(fmt.Sprintf("removed %d futiles", cnt))
// and finally, do some checks and put in links
err = p.postProcess(events)
f("post processed")
if err != nil {
return err // PJW: is this enough? NO
}
p.Events = events
return nil
}
// Special P identifiers.
const (
FakeP = 1000000 + iota
TimerP // depicts timer unblocks
NetpollP // depicts network unblocks
SyscallP // depicts returns from syscalls
GCP // depicts GC state
)
// convert the raw events for a batch into Events, and keep track of
// which G is running on the P that is common to the batch.
func (p *Parsed) firstEvents(b batch) error {
for _, raw := range b.raws {
desc := EventDescriptions[raw.typ]
narg := p.rawArgNum(&raw)
if p.Err != nil {
return p.Err
}
if raw.typ == EvBatch {
// first event, record information about P, G, and Ts
p.lastGs[p.lastP] = p.lastG // 0 the first time through
p.lastP = int(raw.Arg(0))
p.lastG = p.lastGs[p.lastP]
p.lastTs = int64(raw.Arg(1))
continue
}
e := &Event{Type: raw.typ, P: int32(p.lastP), G: p.lastG}
var argoffset int
if p.Version < 1007 { // can't happen.
e.Ts = p.lastTs + int64(raw.Arg(1))
argoffset = 2
} else {
e.Ts = p.lastTs + int64(raw.Arg(0))
argoffset = 1
}
p.lastTs = e.Ts
// collect the args for the raw event e
for i := argoffset; i < narg; i++ {
// evade one byte of corruption (from fuzzing typically)
if raw.args == nil {
return fmt.Errorf("raw.args is nil %s", evname(raw.typ))
}
if i > 0 && i-1 >= len(*raw.args) {
return fmt.Errorf("%s wants arg %d of %d", evname(raw.typ), i, len(*raw.args))
}
if i == narg-1 && desc.Stack {
e.StkID = uint32(raw.Arg(i))
} else {
e.Args[i-argoffset] = raw.Arg(i)
}
}
switch raw.typ {
case EvGoSysCall, EvGCSweepDone, EvGCSweepStart:
if e.G == 0 {
// missing some earlier G's from this P
continue // so we don't know which G is doing it
}
case EvGoStart, EvGoStartLocal, EvGoStartLabel:
p.lastG = e.Args[0]
e.G = p.lastG
if raw.typ == EvGoStartLabel {
e.SArgs = []string{p.Strings[e.Args[2]]}
}
case EvGCSTWStart:
e.G = 0
switch e.Args[0] {
case 0:
e.SArgs = []string{"mark termination"}
case 1:
e.SArgs = []string{"sweep termination"}
default:
return fmt.Errorf("unknown STW kind %d!=0,1 %s", e.Args[0], e)
}
case EvGCStart, EvGCDone, EvGCSTWDone:
e.G = 0
case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt,
EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv,
EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet,
EvGoSysBlock, EvGoBlockGC:
p.lastG = 0
if e.G == 0 {
// missing some earlier G's from this P
continue // so we don't know which G is doing it
}
case EvGoSysExit, EvGoWaiting, EvGoInSyscall:
e.G = e.Args[0]
case EvUserTaskCreate:
// e.Args 0: taskID, 1:parentID, 2:nameID
e.SArgs = []string{p.Strings[e.Args[2]]}
case EvUserRegion:
if e.G == 0 {
continue // don't know its G
}
// e.Args 0: taskID, 1: mode, 2:nameID
e.SArgs = []string{p.Strings[e.Args[2]]}
case EvUserLog:
// e.Args 0: taskID, 1:keyID, 2: stackID
e.SArgs = []string{p.Strings[e.Args[1]], raw.sarg}
}
p.byproc[p.lastP] = append(p.byproc[p.lastP], e)
}
return nil
}
func (p *Parsed) removeFutile(events []*Event) ([]*Event, int) {
// Phase 1: determine futile wakeup sequences.
type G struct {
futile bool
wakeup []*Event // wakeup sequence (subject for removal)
}
gs := make(map[uint64]G)
futile := make(map[*Event]bool)
cnt := 0
for _, ev := range events {
switch ev.Type {
case EvGoUnblock:
g := gs[ev.Args[0]]
g.wakeup = []*Event{ev}
gs[ev.Args[0]] = g
case EvGoStart, EvGoPreempt, EvFutileWakeup:
g := gs[ev.G]
g.wakeup = append(g.wakeup, ev)
if ev.Type == EvFutileWakeup {
g.futile = true
}
gs[ev.G] = g
case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
EvGoBlockSync, EvGoBlockCond:
g := gs[ev.G]
if g.futile {
futile[ev] = true
for _, ev1 := range g.wakeup {
futile[ev1] = true
}
}
delete(gs, ev.G)
cnt++
}
}
// Phase 2: remove futile wakeup sequences.
newEvents := events[:0] // overwrite the original slice
for _, ev := range events {
if !futile[ev] {
newEvents = append(newEvents, ev)
}
}
return newEvents, cnt // PJW: cnt doesn't count the futile[]s
}
// Arg gets the n-th arg from a raw event
func (r *rawEvent) Arg(n int) uint64 {
if n == 0 {
return r.arg0
}
return (*r.args)[n-1]
}
// report the number of arguments. (historical differences)
func (p *Parsed) rawArgNum(ev *rawEvent) int {
desc := EventDescriptions[ev.typ]
switch ev.typ {
case EvStack, EvFrequency, EvTimerGoroutine:
p.Err = fmt.Errorf("%s unexpected in rawArgNum", evname(ev.typ))
return 0
}
narg := len(desc.Args)
if desc.Stack {
narg++
}
if ev.typ == EvBatch {
if p.Version < 1007 {
narg++ // used to be an extra unused arg
}
return narg
}
narg++ // timestamp
if p.Version < 1007 {
narg++ // sequence
}
// various special historical cases
switch ev.typ {
case EvGCSweepDone:
if p.Version < 1009 {
narg -= 2 // 1.9 added 2 args
}
case EvGCStart, EvGoStart, EvGoUnblock:
if p.Version < 1007 {
narg-- // one more since 1.7
}
case EvGCSTWStart:
if p.Version < 1010 {
narg-- // 1.10 added an argument
}
}
return narg
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
import (
"bytes"
"fmt"
"io"
)
// scan the trace file finding the header, starts of batches, and the trailer.
// the trailer contains strings, stacks, and the clock frequency
// There are two ways of thinking about the raw trace file. It starts with a 16
// byte header "go 1.11 trace\0\0\0"
// From the point of
// view of the runtime, there is a collection of initializations for each goroutine.
// These consist of an EvGoCreate, possibly followed by one of EvGoWaiting or
// EvGoInSyscall if the go routine is waiting or in a syscall.
// Then there is an EvProcStart for the first running goroutine, so there's a running P,
// and then an EvGoStart for the first running goroutine. Then as the program runs, the
// runtime emits trace events. Finally when the tracing stops, the runtime emits a footer
// consisting of an EvFrequency (to convert ticks to nanoseconds) and some EvTimerGoroutines,
// followed by EvStacks for all the stack frames.
//
// In the file, the header and footer are as described, but all the events in between come
// in batches headed by EvBatch with the same P, and have to be rearranged into timestamp order.
// New() scans once through the file to find the beginnings of all the batches (EvBatch) and
// processes the footer extracting the strings and stacks.
// Parse() finds the batches that overlap the desired time interval, and processes them into
// events, dropping those outside the desired time interval. But it has to derive the missing
// initializations from the events it sees, as it has no other access to the state of the runtime.
// This is done in robust.go.
// In more detail, scanFile() is called by commonInit() which is called by either New() or ParseBuffer().
// It extracts the strings, the stacks, and remembers the locations of the Batches (all saved in *Parsed).
// Parse first computes the rawEvents for the batches that overlap the requested interval.
// It then calls createEvents() (events.go) which produces Events.
func (p *Parsed) parseHeader() error {
p.r.Seek(0, 0)
var buf [16]byte
n, err := p.r.Read(buf[:])
if n != 16 || err != nil {
return fmt.Errorf("failed to red header: read %d bytes, not 16 %v", n, err)
}
// by hand. there are only 6 or so legitimate values; we could search for a match
if buf[0] != 'g' || buf[1] != 'o' || buf[2] != ' ' ||
buf[3] < '1' || buf[3] > '9' ||
buf[4] != '.' ||
buf[5] < '1' || buf[5] > '9' {
return fmt.Errorf("not a trace file")
}
ver := int(buf[5] - '0')
i := 0
for ; buf[6+i] >= '0' && buf[6+i] <= '9' && i < 2; i++ {
ver = ver*10 + int(buf[6+i]-'0')
}
ver += int(buf[3]-'0') * 1000
if !bytes.Equal(buf[6+i:], []byte(" trace\x00\x00\x00\x00")[:10-i]) {
return fmt.Errorf("not a trace file")
}
p.Version = ver
// PJW: reject 1005 and 1007? They need symbolization, which we don't do.
// Further, doing these would require 1.7 or earlier binaries.
switch ver {
case 1005, 1007:
break // no longer supported
case 1008, 1009:
return nil
case 1010, 1011:
return nil
}
return fmt.Errorf("%d unsupported version", ver)
}
func (p *Parsed) scanFile() error {
r := p.r
// fill in the following values for sure
strings := make(map[uint64]string)
p.Strings = strings // ok to save maps immediately
timerGoIDs := make(map[uint64]bool)
p.timerGoids = timerGoIDs
stacks := make(map[uint32][]*Frame)
framer := make(map[Frame]*Frame) // uniqify *Frame
p.Stacks = stacks
footerLoc := 0
var buf [1]byte
off := 16 // skip the header
n, err := r.Seek(int64(off), 0)
if err != nil || n != int64(off) {
return fmt.Errorf("Seek to %d got %d, err=%v", off, n, err)
}
var batchts int64 // from preceding batch
var lastEv byte
for {
off0 := off
n, err := r.Read(buf[:1])
if err == io.EOF {
break
} else if err != nil || n != 1 {
return fmt.Errorf("read failed at 0x%x, n=%d, %v",
off, n, err)
}
off += n
typ := buf[0] << 2 >> 2
if typ == EvNone || typ >= EvCount ||
EventDescriptions[typ].MinVersion > p.Version {
err = fmt.Errorf("unknown event type %v at offset 0x%x, pass 1", typ, off0)
return err
}
// extract and save the strings
if typ == EvString {
// String dictionary entry [ID, length, string].
var id uint64
id, off, err = readVal(r, off)
if err != nil {
return err
}
if id == 0 {
err = fmt.Errorf("string at offset %d has invalid id 0", off)
return err
}
if strings[id] != "" {
err = fmt.Errorf("string at offset %d has duplicate id %v", off, id)
return err
}
var ln uint64
ln, off, err = readVal(r, off)
if err != nil {
return err
}
if ln == 0 {
err = fmt.Errorf("string at offset %d has invalid length 0", off)
return err
}
if ln > 1e6 {
err = fmt.Errorf("string at offset %d has too large length %v", off, ln)
return err
}
buf := make([]byte, ln)
var n int
n, err = io.ReadFull(r, buf)
if err != nil {
err = fmt.Errorf("failed to read trace at offset %d: read %v, want %v, error %v", off, n, ln, err)
return err
}
off += n
strings[id] = string(buf)
lastEv = EvString
continue
}
p.Count++
if typ == EvFrequency {
// found footer, remember location, save value
footerLoc = off0
}
var args []uint64
off, args, err = p.argsAt(off0, typ)
if err != nil {
err = fmt.Errorf("argsAt error %v; off=%d off0=%d %s",
err, off, off0, evname(typ))
return err
}
r.Seek(int64(off), 0)
if typ == EvUserLog {
_, off, err = readStr(r, off)
if err != nil {
return err
}
}
if len(args) == 0 { // can't happen in well-formed trace file
return fmt.Errorf("len(args)==0 off=0x%x typ=%s", off, evname(typ))
}
switch typ {
case EvBatch:
if footerLoc == 0 {
// EvBatch in footer is just to have a header for stacks
locp := int64(args[0])
p.batches = append(p.batches,
batch{Off: off0, P: locp, Cycles: int64(args[1])})
// at this point we know when the previous batch ended!!
batchts = int64(args[1])
if batchts > p.maxticks {
p.maxticks = batchts
}
}
case EvFrequency:
p.TicksPerSec = int64(args[0])
case EvTimerGoroutine:
timerGoIDs[args[0]] = true
case EvStack:
if len(args) < 2 {
return fmt.Errorf("EvStack has too few args %d at 0x%x",
len(args), off0)
}
size := args[1]
if size > 1000 {
return fmt.Errorf("EvStack has %d frames at 0x%x",
size, off0)
}
want := 2 + 4*size
if uint64(len(args)) != want {
return fmt.Errorf("EvStack wants %d args, got %d, at 0x%x",
len(args), want, off0)
}
id := args[0]
if id != 0 && size > 0 {
stk := make([]*Frame, size)
for i := 0; i < int(size); i++ {
pc := args[2+i*4+0]
fn := args[2+i*4+1]
file := args[2+i*4+2]
line := args[2+i*4+3]
stk[i] = &Frame{PC: pc, Fn: strings[fn], File: strings[file], Line: int(line)}
if _, ok := framer[*stk[i]]; !ok {
framer[*stk[i]] = stk[i]
}
stk[i] = framer[*stk[i]]
}
stacks[uint32(id)] = stk
}
default:
if lastEv == EvBatch {
// p.MinTsVal is set by the first real event, not the first EvBatch
x := batchts + int64(args[0])
if x < p.minticks {
p.minticks = x
}
}
batchts += int64(args[0])
if batchts > p.maxticks {
p.maxticks = batchts
}
}
lastEv = typ
}
if footerLoc <= 0 {
return fmt.Errorf("malformed trace file, no EvFrequency")
}
return nil
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package filebuf implements io.SeekReader for os files.
// This is useful only for very large files with lots of
// seeking. (otherwise use ioutil.ReadFile or bufio)
package filebuf
import (
"fmt"
"io"
"os"
)
// Buf is the implemented interface
type Buf interface {
io.ReadCloser
io.Seeker
Size() int64
Stats() Stat
}
// Buflen is the size of the internal buffer.
// The code is designed to never need to reread unnecessarily
const Buflen = 1 << 20
// fbuf is a buffered file with seeking.
// fixed is an internal buffer. buf is the slice fixed[:fixedLen]. bufloc is the file
// location of the beginning of fixed (and buf). The seek pointer is at bufloc+bufpos,
// so the file's contents there start with buf[bufpos:]
type fbuf struct {
Name string
fd *os.File
size int64 // file size
bufloc int64 // file loc of beginning of fixed
bufpos int32 // seekptr is at bufloc+bufpos. bufpos <= Buflen, fixedLen
fixed [Buflen]byte // backing store for buf
fixedlen int // how much of fixed is valid file contents
buf []byte // buf is fixed[0:fixedlen]
// statistics
seeks int // number of calls to fd.Seek
reads int // number of calls to fd.Read
bytes int64 // number of bytes read by fd.Read
}
// Stat returns the number of underlying seeks and reads, and bytes read
type Stat struct {
Seeks int
Reads int
Bytes int64
}
// Stats returns the stats so far
func (fb *fbuf) Stats() Stat {
return Stat{fb.seeks, fb.reads, fb.bytes}
}
// Size returns the file size
func (fb *fbuf) Size() int64 {
return fb.size
}
// New returns an initialized *fbuf or an error
func New(fname string) (Buf, error) {
fd, err := os.Open(fname)
if err != nil {
return nil, err
}
fi, err := fd.Stat()
if err != nil || fi.Mode().IsDir() {
return nil, fmt.Errorf("not readable: %s", fname)
}
return &fbuf{Name: fname, fd: fd, size: fi.Size()}, nil
}
// Read implements io.Reader. It may return a positive
// number of bytes read with io.EOF
func (fb *fbuf) Read(p []byte) (int, error) {
// If there are enough valid bytes remaining in buf, just use them
if len(fb.buf[fb.bufpos:]) >= len(p) {
copy(p, fb.buf[fb.bufpos:])
fb.bufpos += int32(len(p))
return len(p), nil
}
done := 0 // done counts how many bytes have been transferred
// If there are any valid bytes left in buf, use them first
if len(fb.buf[fb.bufpos:]) > 0 {
m := copy(p, fb.buf[fb.bufpos:])
done = m
fb.bufpos += int32(done) // at end of the valid bytes in buf
}
// used up buffered data. logical seek pointer is at bufloc+bufpos.
// loop until p has been filled up or EOF
for done < len(p) {
loc, err := fb.fd.Seek(0, io.SeekCurrent) // make sure of the os's file location
if loc != fb.bufloc+int64(fb.bufpos) {
panic(fmt.Sprintf("%v loc=%d bufloc=%d bufpos=%d", err, loc,
fb.bufloc, fb.bufpos))
}
fb.seeks++ // did a file system seek
if loc >= fb.size {
// at EOF
fb.bufpos = int32(len(fb.buf))
fb.bufloc = loc - int64(fb.fixedlen)
return done, io.EOF
}
n, err := fb.fd.Read(fb.fixed[:])
if n != 0 {
fb.fixedlen = n
}
fb.reads++ // did a file system read
m := copy(p[done:], fb.fixed[:n])
done += m
if err != nil {
if err == io.EOF {
fb.bufpos = int32(len(fb.buf))
fb.bufloc = loc - int64(fb.fixedlen)
return done, io.EOF
}
return 0, err
}
fb.bytes += int64(n)
fb.bufpos = int32(m) // used m byes of the buffer
fb.bufloc = loc
fb.buf = fb.fixed[:n]
}
return len(p), nil
}
// Seek implements io.Seeker. (<unchanged>, io.EOF) is returned for seeks off the end.
func (fb *fbuf) Seek(offset int64, whence int) (int64, error) {
seekpos := offset
switch whence {
case io.SeekCurrent:
seekpos += fb.bufloc + int64(fb.bufpos)
case io.SeekEnd:
seekpos += fb.size
}
if seekpos < 0 || seekpos > fb.size {
return fb.bufloc + int64(fb.bufpos), io.EOF
}
// if seekpos is inside fixed, just adjust buf and bufpos
if seekpos >= fb.bufloc && seekpos <= int64(fb.fixedlen)+fb.bufloc {
fb.bufpos = int32(seekpos - fb.bufloc)
return seekpos, nil
}
// need to refresh the internal buffer. Seek does no reading, mark buf
// as empty, set bufpos and bufloc.
fb.buf, fb.bufpos, fb.bufloc = nil, 0, seekpos
n, err := fb.fd.Seek(seekpos, io.SeekStart)
fb.seeks++
if n != seekpos || err != nil {
return -1, fmt.Errorf("seek failed (%d!= %d) %v", n, seekpos, err)
}
return seekpos, nil
}
// Close closes the underlying file
func (fb *fbuf) Close() error {
if fb.fd != nil {
return fb.fd.Close()
}
return nil
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filebuf
import (
"bytes"
"io"
"io/ioutil"
"log"
"os"
"testing"
)
var (
inited bool
small, large string // files
dir string // in this dir
contents []byte // contents of the large file
)
func TestMain(m *testing.M) {
create()
n := m.Run()
os.RemoveAll(dir)
os.Exit(n)
}
func create() {
if inited {
return
}
log.SetFlags(log.Lshortfile)
d, erra := ioutil.TempDir("", "filebuf")
s, errb := ioutil.TempFile(dir, "small")
l, errc := ioutil.TempFile(dir, "large")
if erra != nil || errb != nil || errc != nil {
log.Fatal(erra, errb, errc)
}
dir, small, large = d, s.Name(), l.Name()
buf := make([]byte, 2*Buflen+3)
for i := 0; i < len(buf); i++ {
buf[i] = byte(i)
}
err := ioutil.WriteFile(small, buf[:7], 0666)
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(large, buf, 0666)
if err != nil {
log.Fatal(err)
}
contents = buf
inited = true
}
func get(n int) io.Reader {
if n <= len(contents) {
return bytes.NewReader(contents[:n])
}
return bytes.NewReader(contents)
}
func TestSmall(t *testing.T) {
var f Buf
var err error
f, err = New(small)
small := func(t *testing.T) {
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 23)
n, err := f.Read(buf)
if n != 7 || err != io.EOF {
t.Errorf("got %d, expected 7, %v", n, err)
}
m, err := f.Seek(0, io.SeekCurrent)
if m != 7 || err != nil {
t.Errorf("got %d, expected 7, %v", m, err)
}
m, err = f.Seek(1, io.SeekStart)
if m != 1 || err != nil {
t.Errorf("got %d expected 1, %v", m, err)
}
n, err = f.Read(buf)
if n != 6 || err != io.EOF {
t.Errorf("got %d, expected 6, %v", n, err)
}
for i := 0; i < 6; i++ {
if buf[i] != byte(i+1) {
t.Fatalf("byte %d is %d, not %d, %v", i, buf[i], i+1, buf)
}
}
}
t.Run("New", small)
f, err = FromReader(get(7))
t.Run("Rdr", small)
}
func TestLarge(t *testing.T) {
var f Buf
var err error
big := func(t *testing.T) {
if err != nil {
t.Fatal(err)
}
x := Buflen - 7
n, err := f.Seek(int64(x), io.SeekStart)
if n != Buflen-7 || err != nil {
t.Fatalf("expected %d, got %d, %v", x, n, err)
}
buf := make([]byte, 23)
m, err := f.Read(buf)
if m != len(buf) || err != nil {
t.Fatalf("expected %d, got %d, %v", len(buf), m, err)
}
for i := 0; i < 23; i++ {
if buf[i] != byte(x+i) {
t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
byte(x+i))
}
}
m, err = f.Read(buf)
if m != len(buf) || err != nil {
t.Fatalf("got %d, expected %d, %v", m, len(buf), err)
}
x += len(buf)
for i := 0; i < 23; i++ {
if buf[i] != byte(x+i) {
t.Fatalf("byte %d, got %d, wanted %d", i, buf[i],
byte(x+i))
}
}
}
f, err = New(large)
t.Run("New", big)
f, err = FromReader(get(1 << 30))
t.Run("Rdr", big)
}
func TestMore(t *testing.T) {
f, err := New(large)
if err != nil {
t.Fatal(err)
}
var a, b [4]byte
f.Seek(16, 0)
f.Read(a[:])
f.Seek(16, 0)
f.Read(b[:])
if a != b {
t.Errorf("oops %v %v", a, b)
}
}
func TestSeek(t *testing.T) {
f, err := New(small)
if err != nil {
log.Fatal(err)
}
n, err := f.Seek(f.Size(), 0)
if n != f.Size() || err != nil {
t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
}
n, err = f.Seek(1, io.SeekCurrent)
if n != f.Size() || err != io.EOF {
t.Errorf("n=%d, expected 0. %v", n, err)
}
n, err = f.Seek(f.Size(), 0)
if n != f.Size() || err != nil {
t.Errorf("seek got %d, expected %d, %v", n, f.Size(), err)
}
}
func TestReread(t *testing.T) {
f, err := New(small)
if err != nil {
t.Fatal(err)
}
var buf [1]byte
f.Seek(0, 0)
for i := 0; i < int(f.Size()); i++ {
n, err := f.Read(buf[:])
if n != 1 || err != nil {
t.Fatalf("n=%d, err=%v", n, err)
}
}
stats := f.Stats()
if stats.Bytes != f.Size() || stats.Reads != 1 || stats.Seeks != 1 {
t.Errorf("%v %d %d", stats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
}
n, err := f.Read(buf[:])
if n != 0 || err != io.EOF {
t.Fatalf("expected 0 and io.EOF, got %d %v", n, err)
}
f.Seek(0, 0)
xstats := f.Stats()
if xstats.Bytes != f.Size() || xstats.Reads != 1 || xstats.Seeks != 2 {
t.Errorf("%v %v %d %d", stats, xstats, f.(*fbuf).bufloc, f.(*fbuf).bufpos)
}
f.Close()
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filebuf
import (
"bytes"
"io"
)
// implement a Buf version from an io.Reader
type rbuf struct {
buf []byte // contents
pos int64
seeks, reads int // number of calls. 0 seems right.
}
func (r *rbuf) Stats() Stat {
return Stat{r.seeks, r.reads, int64(len(r.buf))}
}
func (r *rbuf) Size() int64 {
return int64(len(r.buf))
}
// FromReader creates a Buf by copying the contents of an io.Reader
func FromReader(rd io.Reader) (Buf, error) {
r := &rbuf{}
x := bytes.NewBuffer(r.buf)
_, err := io.Copy(x, rd)
r.buf = x.Bytes()
if err != nil {
return nil, err
}
return r, nil
}
func (r *rbuf) Close() error {
return nil
}
func (r *rbuf) Read(p []byte) (int, error) {
n := copy(p, r.buf[r.pos:])
r.pos += int64(n)
if n == 0 || n < len(p) {
return n, io.EOF
}
return n, nil
}
func (r *rbuf) Seek(offset int64, whence int) (int64, error) {
seekpos := offset
switch whence {
case io.SeekCurrent:
seekpos += int64(r.pos)
case io.SeekEnd:
seekpos += int64(len(r.buf))
}
if seekpos < 0 || seekpos > int64(len(r.buf)) {
if seekpos < 0 {
r.pos = 0
return 0, nil
}
r.pos = int64(len(r.buf))
return r.pos, nil
}
r.pos = seekpos
return seekpos, nil
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gofuzz
package traceparser
import (
"bytes"
"fmt"
"log"
)
// at first we ran the old parser, and return 0 if it failed, on the theory that we don't have
// to do better. But that leads to very few crashes to look at.
// Maybe better just to make it so that the new parser doesn't misbehave, and if it doesn't get
// an error, that the old parser gets the same results. (up to whatever)
// perhaps even better would be to seed corpus with examples from which the 16-byte header
// has been stripped, and add it in Fuzz, so the fuzzer doesn't spend a lot of time making
// changes we reject in the header. (this may not be necessary)
func Fuzz(data []byte) int {
if len(data) < 16 {
return 0
}
switch x := string(data[:16]); x {
default:
return 0
case "go 1.9 trace\000\000\000\000":
break
case "go 1.10 trace\000\000\000":
break
case "go 1.11 trace\000\000\000":
break
}
p, errp := ParseBuffer(bytes.NewBuffer(data))
if errp != nil {
if p != nil {
panic(fmt.Sprintf("p not nil on error %s", errp))
}
}
// TODO(pjw): if no errors, compare parses?
return 1
}
func init() {
log.SetFlags(log.Lshortfile)
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
import "sort"
// GDesc contains statistics and execution details of a single goroutine.
type GDesc struct {
ID uint64
Name string
PC uint64
CreationTime int64
StartTime int64
EndTime int64
// List of regions in the goroutine, sorted based on the start time.
Regions []*UserRegionDesc
// Statistics of execution time during the goroutine execution.
GExecutionStat
*gDesc // private part.
}
// UserRegionDesc represents a region and goroutine execution stats
// while the region was active.
type UserRegionDesc struct {
TaskID uint64
Name string
// Region start event. Normally EvUserRegion start event or nil,
// but can be EvGoCreate event if the region is a synthetic
// region representing task inheritance from the parent goroutine.
Start *Event
// Region end event. Normally EvUserRegion end event or nil,
// but can be EvGoStop or EvGoEnd event if the goroutine
// terminated without explicitely ending the region.
End *Event
GExecutionStat
}
// GExecutionStat contains statistics about a goroutine's execution
// during a period of time.
type GExecutionStat struct {
ExecTime int64
SchedWaitTime int64
IOTime int64
BlockTime int64
SyscallTime int64
GCTime int64
SweepTime int64
TotalTime int64
}
// sub returns the stats v-s.
func (s GExecutionStat) sub(v GExecutionStat) (r GExecutionStat) {
r = s
r.ExecTime -= v.ExecTime
r.SchedWaitTime -= v.SchedWaitTime
r.IOTime -= v.IOTime
r.BlockTime -= v.BlockTime
r.SyscallTime -= v.SyscallTime
r.GCTime -= v.GCTime
r.SweepTime -= v.SweepTime
r.TotalTime -= v.TotalTime
return r
}
// snapshotStat returns the snapshot of the goroutine execution statistics.
// This is called as we process the ordered trace event stream. lastTs and
// activeGCStartTime are used to process pending statistics if this is called
// before any goroutine end event.
func (g *GDesc) snapshotStat(lastTs, activeGCStartTime int64) (ret GExecutionStat) {
ret = g.GExecutionStat
if g.gDesc == nil {
return ret // finalized GDesc. No pending state.
}
if activeGCStartTime != 0 { // terminating while GC is active
if g.CreationTime < activeGCStartTime {
ret.GCTime += lastTs - activeGCStartTime
} else {
// The goroutine's lifetime completely overlaps
// with a GC.
ret.GCTime += lastTs - g.CreationTime
}
}
if g.TotalTime == 0 {
ret.TotalTime = lastTs - g.CreationTime
}
if g.lastStartTime != 0 {
ret.ExecTime += lastTs - g.lastStartTime
}
if g.blockNetTime != 0 {
ret.IOTime += lastTs - g.blockNetTime
}
if g.blockSyncTime != 0 {
ret.BlockTime += lastTs - g.blockSyncTime
}
if g.blockSyscallTime != 0 {
ret.SyscallTime += lastTs - g.blockSyscallTime
}
if g.blockSchedTime != 0 {
ret.SchedWaitTime += lastTs - g.blockSchedTime
}
if g.blockSweepTime != 0 {
ret.SweepTime += lastTs - g.blockSweepTime
}
return ret
}
// finalize is called when processing a goroutine end event or at
// the end of trace processing. This finalizes the execution stat
// and any active regions in the goroutine, in which case trigger is nil.
func (g *GDesc) finalize(lastTs, activeGCStartTime int64, trigger *Event) {
if trigger != nil {
g.EndTime = trigger.Ts
}
finalStat := g.snapshotStat(lastTs, activeGCStartTime)
g.GExecutionStat = finalStat
for _, s := range g.activeRegions {
s.End = trigger
s.GExecutionStat = finalStat.sub(s.GExecutionStat)
g.Regions = append(g.Regions, s)
}
*(g.gDesc) = gDesc{}
}
// gDesc is a private part of GDesc that is required only during analysis.
type gDesc struct {
lastStartTime int64
blockNetTime int64
blockSyncTime int64
blockSyscallTime int64
blockSweepTime int64
blockGCTime int64
blockSchedTime int64
activeRegions []*UserRegionDesc // stack of active regions
}
// GoroutineStats generates statistics for all goroutines in the trace segment.
func (p *Parsed) GoroutineStats() map[uint64]*GDesc {
events := p.Events
gs := make(map[uint64]*GDesc)
var lastTs int64
var gcStartTime int64 // gcStartTime == 0 indicates gc is inactive.
for _, ev := range events {
lastTs = ev.Ts
switch ev.Type {
case EvGoCreate:
g := &GDesc{ID: ev.Args[0], CreationTime: ev.Ts, gDesc: new(gDesc)}
g.blockSchedTime = ev.Ts
// When a goroutine is newly created, inherit the
// task of the active region. For ease handling of
// this case, we create a fake region description with
// the task id.
if creatorG := gs[ev.G]; creatorG != nil && len(creatorG.gDesc.activeRegions) > 0 {
regions := creatorG.gDesc.activeRegions
s := regions[len(regions)-1]
if s.TaskID != 0 {
g.gDesc.activeRegions = []*UserRegionDesc{
{TaskID: s.TaskID, Start: ev},
}
}
}
gs[g.ID] = g
case EvGoStart, EvGoStartLabel:
g := gs[ev.G]
if g.PC == 0 {
stk := p.Stacks[ev.StkID]
g.PC = stk[0].PC
g.Name = stk[0].Fn
}
g.lastStartTime = ev.Ts
if g.StartTime == 0 {
g.StartTime = ev.Ts
}
if g.blockSchedTime != 0 {
g.SchedWaitTime += ev.Ts - g.blockSchedTime
g.blockSchedTime = 0
}
case EvGoEnd, EvGoStop:
g := gs[ev.G]
g.finalize(ev.Ts, gcStartTime, ev)
case EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect,
EvGoBlockSync, EvGoBlockCond:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
g.blockSyncTime = ev.Ts
case EvGoSched, EvGoPreempt:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
g.blockSchedTime = ev.Ts
case EvGoSleep, EvGoBlock:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
case EvGoBlockNet:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
g.blockNetTime = ev.Ts
case EvGoBlockGC:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
g.blockGCTime = ev.Ts
case EvGoUnblock:
g := gs[ev.Args[0]]
if g.blockNetTime != 0 {
g.IOTime += ev.Ts - g.blockNetTime
g.blockNetTime = 0
}
if g.blockSyncTime != 0 {
g.BlockTime += ev.Ts - g.blockSyncTime
g.blockSyncTime = 0
}
g.blockSchedTime = ev.Ts
case EvGoSysBlock:
g := gs[ev.G]
g.ExecTime += ev.Ts - g.lastStartTime
g.lastStartTime = 0
g.blockSyscallTime = ev.Ts
case EvGoSysExit:
g := gs[ev.G]
if g.blockSyscallTime != 0 {
g.SyscallTime += ev.Ts - g.blockSyscallTime
g.blockSyscallTime = 0
}
g.blockSchedTime = ev.Ts
case EvGCSweepStart:
g := gs[ev.G]
if g != nil {
// Sweep can happen during GC on system goroutine.
g.blockSweepTime = ev.Ts
}
case EvGCSweepDone:
g := gs[ev.G]
if g != nil && g.blockSweepTime != 0 {
g.SweepTime += ev.Ts - g.blockSweepTime
g.blockSweepTime = 0
}
case EvGCStart:
gcStartTime = ev.Ts
case EvGCDone:
for _, g := range gs {
if g.EndTime != 0 {
continue
}
if gcStartTime < g.CreationTime {
g.GCTime += ev.Ts - g.CreationTime
} else {
g.GCTime += ev.Ts - gcStartTime
}
}
gcStartTime = 0 // indicates gc is inactive.
case EvUserRegion:
g := gs[ev.G]
switch mode := ev.Args[1]; mode {
case 0: // region start
g.activeRegions = append(g.activeRegions, &UserRegionDesc{
Name: ev.SArgs[0],
TaskID: ev.Args[0],
Start: ev,
GExecutionStat: g.snapshotStat(lastTs, gcStartTime),
})
case 1: // region end
var sd *UserRegionDesc
if regionStk := g.activeRegions; len(regionStk) > 0 {
n := len(regionStk)
sd = regionStk[n-1]
regionStk = regionStk[:n-1] // pop
g.activeRegions = regionStk
} else {
sd = &UserRegionDesc{
Name: ev.SArgs[0],
TaskID: ev.Args[0],
}
}
sd.GExecutionStat = g.snapshotStat(lastTs, gcStartTime).sub(sd.GExecutionStat)
sd.End = ev
g.Regions = append(g.Regions, sd)
}
}
}
for _, g := range gs {
g.finalize(lastTs, gcStartTime, nil)
// sort based on region start time
sort.Slice(g.Regions, func(i, j int) bool {
x := g.Regions[i].Start
y := g.Regions[j].Start
if x == nil {
return true
}
if y == nil {
return false
}
return x.Ts < y.Ts
})
g.gDesc = nil
}
return gs
}
// RelatedGoroutines finds a set of goroutines related to goroutine goid.
func (p *Parsed) RelatedGoroutines(goid uint64) map[uint64]bool {
events := p.Events
// BFS of depth 2 over "unblock" edges
// (what goroutines unblock goroutine goid?).
gmap := make(map[uint64]bool)
gmap[goid] = true
for i := 0; i < 2; i++ {
gmap1 := make(map[uint64]bool)
for g := range gmap {
gmap1[g] = true
}
for _, ev := range events {
if ev.Type == EvGoUnblock && gmap[ev.Args[0]] {
gmap1[ev.G] = true
}
}
gmap = gmap1
}
gmap[0] = true // for GC events
return gmap
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
var (
// testfiles from the old trace parser
otherDir = "../trace/testdata/"
want = map[string]bool{"http_1_9_good": true, "http_1_10_good": true, "http_1_11_good": true,
"stress_1_9_good": true, "stress_1_10_good": true, "stress_1_11_good": true,
"stress_start_stop_1_9_good": true, "stress_start_stop_1_10_good": true,
"stress_start_stop_1_11_good": true, "user_task_span_1_11_good": true,
"http_1_5_good": false, "http_1_7_good": false,
"stress_1_5_good": false, "stress_1_5_unordered": false, "stress_1_7_good": false,
"stress_start_stop_1_5_good": false, "stress_start_stop_1_7_good": false,
}
)
func TestRemoteFiles(t *testing.T) {
files, err := ioutil.ReadDir(otherDir)
if err != nil {
t.Fatal(err)
}
for _, f := range files {
fname := filepath.Join(otherDir, f.Name())
p, err := New(fname)
if err == nil {
err = p.Parse(0, 1<<62, nil)
}
if err == nil != want[f.Name()] {
t.Errorf("%s: got %v expected %v, err=%v",
f.Name(), err == nil, want[f.Name()], err)
}
}
}
func TestLocalFiles(t *testing.T) {
files, err := ioutil.ReadDir("./testdata")
if err != nil {
t.Fatalf("failed to read ./testdata: %v", err)
}
for _, f := range files {
fname := filepath.Join("./testdata", f.Name())
p, err := New(fname)
if err == nil {
err = p.Parse(0, 1<<62, nil)
}
switch {
case strings.Contains(f.Name(), "good"),
strings.Contains(f.Name(), "weird"):
if err != nil {
t.Errorf("unexpected failure %v %s", err, f.Name())
}
case strings.Contains(f.Name(), "bad"):
if err == nil {
t.Errorf("bad file did not fail %s", f.Name())
}
default:
t.Errorf("untyped file %v %s", err, f.Name())
}
}
}
func TestStats(t *testing.T) {
// Need just one good file to see that OSStats work properly,
files, err := ioutil.ReadDir("./testdata")
if err != nil {
t.Fatal(err)
}
for _, f := range files {
if !strings.HasPrefix(f.Name(), "good") {
continue
}
fname := filepath.Join("./testdata", f.Name())
p, err := New(fname)
if err != nil {
t.Fatal(err)
}
stat := p.OSStats()
if stat.Bytes == 0 || stat.Seeks == 0 || stat.Reads == 0 {
t.Errorf("OSStats impossible %v", stat)
}
fd, err := os.Open(fname)
if err != nil {
t.Fatal(err)
}
pb, err := ParseBuffer(fd)
if err != nil {
t.Fatal(err)
}
stat = pb.OSStats()
if stat.Seeks != 0 || stat.Reads != 0 {
t.Errorf("unexpected positive results %v", stat)
}
}
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
import (
"encoding/binary"
"fmt"
"hash/fnv"
"io"
"log"
)
// convert batches into their raw events. For small intervals (1 or 10 seconds)
// this takes about 40% of the total Parse time.
func (p *Parsed) batchify(b *batch) error {
evs := make([]rawEvent, 0)
p.seenArgs = make(map[uint64]*[]uint64)
hasher := fnv.New64()
r := p.r
r.Seek(int64(b.Off), 0)
var buf [1]byte
seenBatch := false // to terminate the loop on the second EvBatch
for off := b.Off; ; {
off0 := off // remember the beginning of the event
n, err := r.Read(buf[:])
if err != nil {
return err
}
off += n
typ := buf[0] << 2 >> 2 // event type is bottom 6 bits
if typ == EvFrequency || (typ == EvBatch && seenBatch) {
break // found trailer, or next batch
}
if typ == EvBatch {
seenBatch = true
}
if typ == EvString {
// skip over it. error checking was done in file.go
_, off, _ = readVal(r, off)
var ln uint64
ln, off, _ = readVal(r, off)
// PJW: why not just seek ahead ln bytes?
if false {
buf := make([]byte, ln)
var n int
n, _ = io.ReadFull(r, buf)
off += n
} else {
n, _ := r.Seek(int64(ln), 1)
off = int(n)
}
continue
}
// build the raw event and collect its arguments
ev := rawEvent{typ: typ, off: uint32(off0 - b.Off)}
var args []uint64
off, args, err = p.argsAt(off0, typ)
if err != nil {
// PJW: make sure this is useful
return fmt.Errorf("parsing %s failed at P=%d off=%d %v", evname(typ),
b.P, off0, err)
}
// have we seen the args before?
if len(args) > 0 {
ev.arg0 = args[0]
if len(args) > 1 {
hasher.Reset()
for i := 1; i < len(args); i++ {
var x [8]byte
binary.LittleEndian.PutUint64(x[:], args[i])
_, err := hasher.Write(x[:])
if err != nil {
log.Fatal(err)
}
}
hc := hasher.Sum64()
old, ok := p.seenArgs[hc]
if !ok {
final := make([]uint64, len(args)-1)
copy(final, args[1:])
p.seenArgs[hc] = &final
} else {
// is this a collision? PJW: make this precisely right
if len(*old) != len(args[1:]) {
log.Fatalf("COLLISION old:%v this:%v", *old, args[1:])
}
}
ev.args = p.seenArgs[hc]
}
}
if typ == EvUserLog {
// argsAt didn't read the string argument
var s string
s, off, err = readStr(r, off)
ev.sarg = s
}
evs = append(evs, ev)
}
b.raws = evs
return nil
}
This diff is collapsed.
This diff is collapsed.
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package traceparser
// test routines used by tests in the trace commands
import "bytes"
// Writer is a test trace writer.
type Writer struct {
bytes.Buffer
}
// NewWriter provides the test Writer
func NewWriter() *Writer {
w := new(Writer)
w.Write([]byte("go 1.9 trace\x00\x00\x00\x00"))
return w
}
// Emit writes an event record to the trace.
// See Event types for valid types and required arguments.
func (w *Writer) Emit(typ byte, args ...uint64) {
nargs := byte(len(args)) - 1
if nargs > 3 {
nargs = 3
}
buf := []byte{typ | nargs<<6}
if nargs == 3 {
buf = append(buf, 0)
}
for _, a := range args {
buf = appendVarint(buf, a)
}
if nargs == 3 {
buf[1] = byte(len(buf) - 2)
}
n, err := w.Write(buf)
if n != len(buf) || err != nil {
panic("failed to write")
}
}
func appendVarint(buf []byte, v uint64) []byte {
for ; v >= 0x80; v >>= 7 {
buf = append(buf, 0x80|byte(v))
}
buf = append(buf, byte(v))
return buf
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment