Commit 2b58ca6e authored by David Chase's avatar David Chase

cmd/compile: begin OpArg and OpPhi location lists at block start

For the entry block, make the "first instruction" be truly
the first instruction.  This allows printing of incoming
parameters with Delve.

Also be sure Phis are marked as being at the start of their
block.  This is observed to move location list pointers,
and where moved, they become correct.

Leading zero-width instructions include LoweredGetClosurePtr.
Because this instruction is actually architecture-specific,
and it is now tested for in 3 different places, also created
Op.isLoweredGetClosurePtr() to reduce future surprises.

Change-Id: Ic043b7265835cf1790382a74334b5714ae4060af
Reviewed-on: https://go-review.googlesource.com/c/145179
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarHeschi Kreinick <heschi@google.com>
parent 311d87db
......@@ -5190,6 +5190,10 @@ func genssa(f *ssa.Func, pp *Progs) {
e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
switch v {
case ssa.BlockStart.ID:
if b == f.Entry.ID {
return 0 // Start at the very beginning, at the assembler-generated prologue.
// this should only happen for function args (ssa.OpArg)
}
return bstart[b].Pc
case ssa.BlockEnd.ID:
return e.curfn.Func.lsym.Size
......@@ -5199,7 +5203,7 @@ func genssa(f *ssa.Func, pp *Progs) {
}
}
// Resolove branchers, and relax DefaultStmt into NotStmt
// Resolve branches, and relax DefaultStmt into NotStmt
for _, br := range s.Branches {
br.P.To.Val = s.bstart[br.B.ID]
if br.P.Pos.IsStmt() != src.PosIsStmt {
......
......@@ -826,6 +826,7 @@ func firstReg(set RegisterSet) uint8 {
func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
// Run through the function in program text order, building up location
// lists as we go. The heavy lifting has mostly already been done.
for _, b := range state.f.Blocks {
if !blockLocs[b.ID].relevant {
continue
......@@ -834,13 +835,24 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
state.mergePredecessors(b, blockLocs)
zeroWidthPending := false
apcChangedSize := 0 // size of changedVars for leading Args, Phi, ClosurePtr
// expect to see values in pattern (apc)* (zerowidth|real)*
for _, v := range b.Values {
slots := state.valueNames[v.ID]
reg, _ := state.f.getHome(v.ID).(*Register)
changed := state.processValue(v, slots, reg)
changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
if opcodeTable[v.Op].zeroWidth {
if changed {
if v.Op == OpArg || v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() {
// These ranges begin at true beginning of block, not after first instruction
if zeroWidthPending {
b.Func.Fatalf("Unexpected op mixed with OpArg/OpPhi/OpLoweredGetClosurePtr at beginning of block %s in %s\n%s", b, b.Func.Name, b.Func)
}
apcChangedSize = len(state.changedVars.contents())
continue
}
// Other zero-width ops must wait on a "real" op.
zeroWidthPending = true
}
continue
......@@ -849,12 +861,25 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
if !changed && !zeroWidthPending {
continue
}
// Not zero-width; i.e., a "real" instruction.
zeroWidthPending = false
for _, varID := range state.changedVars.contents() {
state.updateVar(VarID(varID), v, state.currentState.slots)
for i, varID := range state.changedVars.contents() {
if i < apcChangedSize { // buffered true start-of-block changes
state.updateVar(VarID(varID), v.Block, BlockStart)
} else {
state.updateVar(VarID(varID), v.Block, v)
}
}
state.changedVars.clear()
apcChangedSize = 0
}
for i, varID := range state.changedVars.contents() {
if i < apcChangedSize { // buffered true start-of-block changes
state.updateVar(VarID(varID), b, BlockStart)
} else {
state.updateVar(VarID(varID), b, BlockEnd)
}
}
}
......@@ -877,8 +902,10 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
}
// updateVar updates the pending location list entry for varID to
// reflect the new locations in curLoc, caused by v.
func (state *debugState) updateVar(varID VarID, v *Value, curLoc []VarLoc) {
// reflect the new locations in curLoc, beginning at v in block b.
// v may be one of the special values indicating block start or end.
func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
curLoc := state.currentState.slots
// Assemble the location list entry with whatever's live.
empty := true
for _, slotID := range state.varSlots[varID] {
......@@ -889,7 +916,7 @@ func (state *debugState) updateVar(varID VarID, v *Value, curLoc []VarLoc) {
}
pending := &state.pendingEntries[varID]
if empty {
state.writePendingEntry(varID, v.Block.ID, v.ID)
state.writePendingEntry(varID, b.ID, v.ID)
pending.clear()
return
}
......@@ -908,9 +935,9 @@ func (state *debugState) updateVar(varID VarID, v *Value, curLoc []VarLoc) {
}
}
state.writePendingEntry(varID, v.Block.ID, v.ID)
state.writePendingEntry(varID, b.ID, v.ID)
pending.present = true
pending.startBlock = v.Block.ID
pending.startBlock = b.ID
pending.startValue = v.ID
for i, slot := range state.varSlots[varID] {
pending.pieces[i] = curLoc[slot]
......
......@@ -62,6 +62,16 @@ func (h ValHeap) Less(i, j int) bool {
return x.ID > y.ID
}
func (op Op) isLoweredGetClosurePtr() bool {
switch op {
case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
OpWasmLoweredGetClosurePtr:
return true
}
return false
}
// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a
......@@ -92,11 +102,7 @@ func schedule(f *Func) {
// Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values {
switch {
case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr ||
v.Op == OpWasmLoweredGetClosurePtr:
case v.Op.isLoweredGetClosurePtr():
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
......@@ -189,9 +195,11 @@ func schedule(f *Func) {
}
}
if b.Control != nil && b.Control.Op != OpPhi {
if b.Control != nil && b.Control.Op != OpPhi && b.Control.Op != OpArg {
// Force the control value to be scheduled at the end,
// unless it is a phi value (which must be first).
// OpArg also goes first -- if it is stack it register allocates
// to a LoadReg, if it is register it is from the beginning anyway.
score[b.Control.ID] = ScoreControl
// Schedule values dependent on the control value at the end.
......
......@@ -13,15 +13,14 @@ func tighten(f *Func) {
canMove := make([]bool, f.NumValues())
for _, b := range f.Blocks {
for _, v := range b.Values {
if v.Op.isLoweredGetClosurePtr() {
// Must stay in the entry block.
continue
}
switch v.Op {
case OpPhi, OpArg, OpSelect0, OpSelect1,
OpAMD64LoweredGetClosurePtr, Op386LoweredGetClosurePtr,
OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
OpMIPSLoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr,
OpS390XLoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr,
OpWasmLoweredGetClosurePtr:
case OpPhi, OpArg, OpSelect0, OpSelect1:
// Phis need to stay in their block.
// GetClosurePtr & Arg must stay in the entry block.
// Arg must stay in the entry block.
// Tuple selectors must stay with the tuple generator.
continue
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment