Commit c18ff184 authored by David Chase's avatar David Chase

cmd/compile: decouple emitted block order from regalloc block order

While tinkering with different block orders for the preemptible
loop experiment, crashed the register allocator with a "bad"
one (these exist).  Realized that one knob was controlling
two things (register allocation and branch patterns) and
decided that life would be simpler if the two orders were
independent.

Ran some experiments and determined that we have probably,
mostly, been optimizing for register allocation effects, not
branch effects.  Bad block orders for register allocation are
somewhat costly.

This will also allow separate experimentation with perhaps-
better block orders for register allocation.

Change-Id: I6ecf2f24cca178b6f8acc0d3c4caaef043c11ed9
Reviewed-on: https://go-review.googlesource.com/47314
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarCherry Zhang <cherryyz@google.com>
parent a66af728
...@@ -8,6 +8,33 @@ package ssa ...@@ -8,6 +8,33 @@ package ssa
// After this phase returns, the order of f.Blocks matters and is the order // After this phase returns, the order of f.Blocks matters and is the order
// in which those blocks will appear in the assembly output. // in which those blocks will appear in the assembly output.
func layout(f *Func) { func layout(f *Func) {
f.Blocks = layoutOrder(f)
}
// Register allocation may use a different order which has constraints
// imposed by the linear-scan algorithm. Note that that f.pass here is
// regalloc, so the switch is conditional on -d=ssa/regalloc/test=N
func layoutRegallocOrder(f *Func) []*Block {
switch f.pass.test {
case 0: // layout order
return layoutOrder(f)
case 1: // existing block order
return f.Blocks
case 2: // reverse of postorder; legal, but usually not good.
po := f.postorder()
visitOrder := make([]*Block, len(po))
for i, b := range po {
j := len(po) - i - 1
visitOrder[j] = b
}
return visitOrder
}
return nil
}
func layoutOrder(f *Func) []*Block {
order := make([]*Block, 0, f.NumBlocks()) order := make([]*Block, 0, f.NumBlocks())
scheduled := make([]bool, f.NumBlocks()) scheduled := make([]bool, f.NumBlocks())
idToBlock := make([]*Block, f.NumBlocks()) idToBlock := make([]*Block, f.NumBlocks())
...@@ -116,5 +143,5 @@ blockloop: ...@@ -116,5 +143,5 @@ blockloop:
} }
} }
} }
f.Blocks = order return order
} }
...@@ -283,6 +283,9 @@ type regAllocState struct { ...@@ -283,6 +283,9 @@ type regAllocState struct {
copies map[*Value]bool copies map[*Value]bool
loopnest *loopnest loopnest *loopnest
// choose a good order in which to visit blocks for allocation purposes.
visitOrder []*Block
} }
type endReg struct { type endReg struct {
...@@ -589,11 +592,23 @@ func (s *regAllocState) init(f *Func) { ...@@ -589,11 +592,23 @@ func (s *regAllocState) init(f *Func) {
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go) s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
} }
// Linear scan register allocation can be influenced by the order in which blocks appear.
// Decouple the register allocation order from the generated block order.
// This also creates an opportunity for experiments to find a better order.
s.visitOrder = layoutRegallocOrder(f)
// Compute block order. This array allows us to distinguish forward edges
// from backward edges and compute how far they go.
blockOrder := make([]int32, f.NumBlocks())
for i, b := range s.visitOrder {
blockOrder[b.ID] = int32(i)
}
s.regs = make([]regState, s.numRegs) s.regs = make([]regState, s.numRegs)
s.values = make([]valState, f.NumValues()) s.values = make([]valState, f.NumValues())
s.orig = make([]*Value, f.NumValues()) s.orig = make([]*Value, f.NumValues())
s.copies = make(map[*Value]bool) s.copies = make(map[*Value]bool)
for _, b := range f.Blocks { for _, b := range s.visitOrder {
for _, v := range b.Values { for _, v := range b.Values {
if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() { if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
s.values[v.ID].needReg = true s.values[v.ID].needReg = true
...@@ -606,16 +621,9 @@ func (s *regAllocState) init(f *Func) { ...@@ -606,16 +621,9 @@ func (s *regAllocState) init(f *Func) {
} }
s.computeLive() s.computeLive()
// Compute block order. This array allows us to distinguish forward edges
// from backward edges and compute how far they go.
blockOrder := make([]int32, f.NumBlocks())
for i, b := range f.Blocks {
blockOrder[b.ID] = int32(i)
}
// Compute primary predecessors. // Compute primary predecessors.
s.primary = make([]int32, f.NumBlocks()) s.primary = make([]int32, f.NumBlocks())
for _, b := range f.Blocks { for _, b := range s.visitOrder {
best := -1 best := -1
for i, e := range b.Preds { for i, e := range b.Preds {
p := e.b p := e.b
...@@ -728,7 +736,7 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -728,7 +736,7 @@ func (s *regAllocState) regalloc(f *Func) {
f.Fatalf("entry block must be first") f.Fatalf("entry block must be first")
} }
for _, b := range f.Blocks { for _, b := range s.visitOrder {
if s.f.pass.debug > regDebug { if s.f.pass.debug > regDebug {
fmt.Printf("Begin processing block %v\n", b) fmt.Printf("Begin processing block %v\n", b)
} }
...@@ -1544,7 +1552,7 @@ func (s *regAllocState) regalloc(f *Func) { ...@@ -1544,7 +1552,7 @@ func (s *regAllocState) regalloc(f *Func) {
} }
} }
for _, b := range f.Blocks { for _, b := range s.visitOrder {
i := 0 i := 0
for _, v := range b.Values { for _, v := range b.Values {
if v.Op == OpInvalid { if v.Op == OpInvalid {
...@@ -1562,7 +1570,7 @@ func (s *regAllocState) placeSpills() { ...@@ -1562,7 +1570,7 @@ func (s *regAllocState) placeSpills() {
// Precompute some useful info. // Precompute some useful info.
phiRegs := make([]regMask, f.NumBlocks()) phiRegs := make([]regMask, f.NumBlocks())
for _, b := range f.Blocks { for _, b := range s.visitOrder {
var m regMask var m regMask
for _, v := range b.Values { for _, v := range b.Values {
if v.Op != OpPhi { if v.Op != OpPhi {
...@@ -1672,7 +1680,7 @@ func (s *regAllocState) placeSpills() { ...@@ -1672,7 +1680,7 @@ func (s *regAllocState) placeSpills() {
// Insert spill instructions into the block schedules. // Insert spill instructions into the block schedules.
var oldSched []*Value var oldSched []*Value
for _, b := range f.Blocks { for _, b := range s.visitOrder {
nphi := 0 nphi := 0
for _, v := range b.Values { for _, v := range b.Values {
if v.Op != OpPhi { if v.Op != OpPhi {
...@@ -1701,7 +1709,7 @@ func (s *regAllocState) shuffle(stacklive [][]ID) { ...@@ -1701,7 +1709,7 @@ func (s *regAllocState) shuffle(stacklive [][]ID) {
fmt.Println(s.f.String()) fmt.Println(s.f.String())
} }
for _, b := range s.f.Blocks { for _, b := range s.visitOrder {
if len(b.Preds) <= 1 { if len(b.Preds) <= 1 {
continue continue
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment