Commit fd38dbc8 authored by Russ Cox's avatar Russ Cox

cmd/internal/gc: cache ProgInfo in Prog

The ProgInfo is loaded many times during each analysis pass.
Load it once at the beginning (in Flowstart if using that, or explicitly,
as in plive.go) and then refer to the cached copy.

Removes many calls to proginfo.

Makes Prog a little bigger, but the previous CL more than compensates.

Change-Id: If90a12fc6729878fdae10444f9c3bedc8d85026e
Reviewed-on: https://go-review.googlesource.com/7745Reviewed-by: 's avatarJosh Bleecher Snyder <josharian@gmail.com>
parent 532ccae1
......@@ -257,7 +257,6 @@ func subprop(r0 *gc.Flow) bool {
if !regtyp(v2) {
return false
}
var info gc.ProgInfo
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
......@@ -266,14 +265,16 @@ func subprop(r0 *gc.Flow) bool {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
info = proginfo(p)
if info.Flags&gc.Call != 0 {
if p.Info.Flags&gc.Call != 0 {
return false
}
if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
info.Flags |= gc.RegRead
info.Flags &^= (gc.CanRegRead | gc.RightRead)
// TODO(rsc): Whatever invalidated the info should have done this call.
proginfo(p)
if (p.Info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
p.Info.Flags |= gc.RegRead
p.Info.Flags &^= (gc.CanRegRead | gc.RightRead)
p.Reg = p.To.Reg
}
......@@ -284,7 +285,7 @@ func subprop(r0 *gc.Flow) bool {
return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
if p.Scond == arm.C_SCOND_NONE {
......
This diff is collapsed.
......@@ -557,7 +557,6 @@ func subprop(r0 *gc.Flow) bool {
return false
}
var info gc.ProgInfo
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
......@@ -573,22 +572,21 @@ func subprop(r0 *gc.Flow) bool {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
info = proginfo(p)
if info.Flags&gc.Call != 0 {
if p.Info.Flags&gc.Call != 0 {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
return false
}
if info.Reguse|info.Regset != 0 {
if p.Info.Reguse|p.Info.Regset != 0 {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
......@@ -820,25 +818,24 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
return 0
}
info := proginfo(p)
if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
return 2
}
if info.Flags&gc.LeftAddr != 0 {
if p.Info.Flags&gc.LeftAddr != 0 {
if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
if p.Info.Flags&gc.RightWrite != 0 {
if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
......@@ -850,7 +847,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
}
if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
......
This diff is collapsed.
This diff is collapsed.
......@@ -46,14 +46,11 @@ var gactive uint32
// do we need the carry bit
func needc(p *obj.Prog) bool {
var info gc.ProgInfo
for p != nil {
info = proginfo(p)
if info.Flags&gc.UseCarry != 0 {
if p.Info.Flags&gc.UseCarry != 0 {
return true
}
if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
if p.Info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
return false
}
p = p.Link
......@@ -370,7 +367,6 @@ func subprop(r0 *gc.Flow) bool {
if !regtyp(v2) {
return false
}
var info gc.ProgInfo
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
......@@ -382,16 +378,15 @@ func subprop(r0 *gc.Flow) bool {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
info = proginfo(p)
if info.Flags&gc.Call != 0 {
if p.Info.Flags&gc.Call != 0 {
return false
}
if info.Reguse|info.Regset != 0 {
if p.Info.Reguse|p.Info.Regset != 0 {
return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
if (p.Info.Flags&gc.Move != 0) && (p.Info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
......@@ -610,26 +605,24 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
return 0
}
var info gc.ProgInfo
info = proginfo(p)
if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
if (p.Info.Reguse|p.Info.Regset)&RtoB(int(v.Reg)) != 0 {
return 2
}
if info.Flags&gc.LeftAddr != 0 {
if p.Info.Flags&gc.LeftAddr != 0 {
if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
if p.Info.Flags&gc.RightWrite != 0 {
if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
......@@ -641,7 +634,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
}
if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
if p.Info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
if s != nil {
if copysub(&p.From, v, s, 1) != 0 {
return 1
......
This diff is collapsed.
......@@ -407,7 +407,6 @@ func subprop(r0 *gc.Flow) bool {
if !regtyp(v2) {
return false
}
var info gc.ProgInfo
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
......@@ -416,12 +415,11 @@ func subprop(r0 *gc.Flow) bool {
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
info = proginfo(p)
if info.Flags&gc.Call != 0 {
if p.Info.Flags&gc.Call != 0 {
return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
copysub(&p.To, v1, v2, 1)
......
This diff is collapsed.
......@@ -737,12 +737,6 @@ type Graph struct {
/*
* interface to back end
*/
type ProgInfo struct {
Flags uint32 // the bits below
Reguse uint64 // registers implicitly used by this instruction
Regset uint64 // registers implicitly set by this instruction
Regindex uint64 // registers used by addressing mode
}
const (
// Pseudo-op, like TEXT, GLOBL, TYPE, PCDATA, FUNCDATA.
......@@ -823,7 +817,7 @@ type Arch struct {
Igen func(*Node, *Node, *Node)
Linkarchinit func()
Peep func(*obj.Prog)
Proginfo func(*obj.Prog) ProgInfo
Proginfo func(*obj.Prog) // fills in Prog.Info
Regalloc func(*Node, *Type, *Node)
Regfree func(*Node)
Regtyp func(*obj.Addr) bool
......
......@@ -422,6 +422,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
bb := newblock(firstp)
cfg = append(cfg, bb)
for p := firstp; p != nil; p = p.Link {
Thearch.Proginfo(p)
if p.To.Type == obj.TYPE_BRANCH {
if p.To.Val == nil {
Fatal("prog branch to nil")
......@@ -561,7 +562,6 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
bvresetall(varkill)
bvresetall(avarinit)
info := Thearch.Proginfo(prog)
if prog.As == obj.ARET {
// Return instructions implicitly read all the arguments. For
// the sake of correctness, out arguments must be read. For the
......@@ -612,7 +612,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
return
}
if info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
if prog.Info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
from := &prog.From
if from.Node != nil && from.Sym != nil && ((from.Node).(*Node)).Curfn == Curfn {
switch ((from.Node).(*Node)).Class &^ PHEAP {
......@@ -629,10 +629,10 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
if ((from.Node).(*Node)).Addrtaken {
bvset(avarinit, pos)
} else {
if info.Flags&(LeftRead|LeftAddr) != 0 {
if prog.Info.Flags&(LeftRead|LeftAddr) != 0 {
bvset(uevar, pos)
}
if info.Flags&LeftWrite != 0 {
if prog.Info.Flags&LeftWrite != 0 {
if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
bvset(varkill, pos)
}
......@@ -643,7 +643,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
}
Next:
if info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
if prog.Info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
to := &prog.To
if to.Node != nil && to.Sym != nil && ((to.Node).(*Node)).Curfn == Curfn {
switch ((to.Node).(*Node)).Class &^ PHEAP {
......@@ -673,10 +673,10 @@ Next:
// It is not a read. It is equivalent to RightWrite except that
// having the RightAddr bit set keeps the registerizer from
// trying to substitute a register for the memory location.
if (info.Flags&RightRead != 0) || info.Flags&(RightAddr|RightWrite) == RightAddr {
if (prog.Info.Flags&RightRead != 0) || prog.Info.Flags&(RightAddr|RightWrite) == RightAddr {
bvset(uevar, pos)
}
if info.Flags&RightWrite != 0 {
if prog.Info.Flags&RightWrite != 0 {
if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
bvset(varkill, pos)
}
......
......@@ -355,15 +355,13 @@ func fixjmp(firstp *obj.Prog) {
var flowmark int
func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
var info ProgInfo
// Count and mark instructions to annotate.
nf := 0
for p := firstp; p != nil; p = p.Link {
p.Opt = nil // should be already, but just in case
info = Thearch.Proginfo(p)
if info.Flags&Skip != 0 {
Thearch.Proginfo(p)
if p.Info.Flags&Skip != 0 {
continue
}
p.Opt = &flowmark
......@@ -409,8 +407,7 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
var p *obj.Prog
for f := start; f != nil; f = f.Link {
p = f.Prog
info = Thearch.Proginfo(p)
if info.Flags&Break == 0 {
if p.Info.Flags&Break == 0 {
f1 = f.Link
f.S1 = f1
f1.P1 = f
......@@ -442,6 +439,7 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
func Flowend(graph *Graph) {
for f := graph.Start; f != nil; f = f.Link {
f.Prog.Info.Flags = 0 // drop cached proginfo
f.Prog.Opt = nil
}
}
......@@ -714,12 +712,8 @@ func mergetemp(firstp *obj.Prog) {
// We assume that the earliest reference to a temporary is its definition.
// This is not true of variables in general but our temporaries are all
// single-use (that's why we have so many!).
var p *obj.Prog
var info ProgInfo
for f := g.Start; f != nil; f = f.Link {
p = f.Prog
info = Thearch.Proginfo(p)
p := f.Prog
if p.From.Node != nil && ((p.From.Node).(*Node)).Opt != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt != nil {
Fatal("double node %v", p)
}
......@@ -740,7 +734,7 @@ func mergetemp(firstp *obj.Prog) {
}
f.Data = v.use
v.use = f
if n == p.From.Node && (info.Flags&LeftAddr != 0) {
if n == p.From.Node && (p.Info.Flags&LeftAddr != 0) {
v.addr = 1
}
}
......@@ -753,9 +747,6 @@ func mergetemp(firstp *obj.Prog) {
nkill := 0
// Special case.
var p1 *obj.Prog
var info1 ProgInfo
var f *Flow
for i := 0; i < len(var_); i++ {
v = &var_[i]
if v.addr != 0 {
......@@ -763,11 +754,10 @@ func mergetemp(firstp *obj.Prog) {
}
// Used in only one instruction, which had better be a write.
f = v.use
f := v.use
if f != nil && f.Data.(*Flow) == nil {
p = f.Prog
info = Thearch.Proginfo(p)
if p.To.Node == v.node && (info.Flags&RightWrite != 0) && info.Flags&RightRead == 0 {
p := f.Prog
if p.To.Node == v.node && (p.Info.Flags&RightWrite != 0) && p.Info.Flags&RightRead == 0 {
p.As = obj.ANOP
p.To = obj.Addr{}
v.removed = 1
......@@ -785,14 +775,12 @@ func mergetemp(firstp *obj.Prog) {
// no jumps to the next instruction. Happens mainly in 386 compiler.
f = v.use
if f != nil && f.Link == f.Data.(*Flow) && (f.Data.(*Flow)).Data.(*Flow) == nil && Uniqp(f.Link) == f {
p = f.Prog
info = Thearch.Proginfo(p)
p1 = f.Link.Prog
info1 = Thearch.Proginfo(p1)
p := f.Prog
p1 := f.Link.Prog
const (
SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
)
if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && (info.Flags|info1.Flags)&(LeftAddr|RightAddr) == 0 && info.Flags&SizeAny == info1.Flags&SizeAny {
if p.From.Node == v.node && p1.To.Node == v.node && (p.Info.Flags&Move != 0) && (p.Info.Flags|p1.Info.Flags)&(LeftAddr|RightAddr) == 0 && p.Info.Flags&SizeAny == p1.Info.Flags&SizeAny {
p1.From = p.From
Thearch.Excise(f)
v.removed = 1
......@@ -814,12 +802,12 @@ func mergetemp(firstp *obj.Prog) {
for i := 0; i < len(var_); i++ {
v = &var_[i]
gen++
for f = v.use; f != nil; f = f.Data.(*Flow) {
for f := v.use; f != nil; f = f.Data.(*Flow) {
mergewalk(v, f, uint32(gen))
}
if v.addr != 0 {
gen++
for f = v.use; f != nil; f = f.Data.(*Flow) {
for f := v.use; f != nil; f = f.Data.(*Flow) {
varkillwalk(v, f, uint32(gen))
}
}
......@@ -935,7 +923,7 @@ func mergetemp(firstp *obj.Prog) {
// Update node references to use merged temporaries.
for f := g.Start; f != nil; f = f.Link {
p = f.Prog
p := f.Prog
n, _ = p.From.Node.(*Node)
if n != nil {
v, _ = n.Opt.(*TempVar)
......@@ -1109,13 +1097,9 @@ func nilopt(firstp *obj.Prog) {
}
func nilwalkback(fcheck *Flow) {
var p *obj.Prog
var info ProgInfo
for f := fcheck; f != nil; f = Uniqp(f) {
p = f.Prog
info = Thearch.Proginfo(p)
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
p := f.Prog
if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
// Found initialization of value we're checking for nil.
// without first finding the check, so this one is unchecked.
return
......@@ -1146,8 +1130,7 @@ for(f1 = f0; f1 != nil; f1 = f1->p1) {
if(f1 != fcheck && p->as == ACHECKNIL && thearch.sameaddr(&p->from, &fcheck->prog->from))
break;
thearch.proginfo(&info, p);
if((info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
if((p.Info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
// Found initialization of value we're checking for nil.
// without first finding the check, so this one is unchecked.
fcheck->kill = 0;
......@@ -1168,10 +1151,8 @@ for(f = f0; f != f1; f = f->p1)
for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
nilwalkback(fcheck, f2, gen);
*/
func nilwalkfwd(fcheck *Flow) {
var p *obj.Prog
var info ProgInfo
func nilwalkfwd(fcheck *Flow) {
// If the path down from rcheck dereferences the address
// (possibly with a small offset) before writing to memory
// and before any subsequent checks, it's okay to wait for
......@@ -1179,18 +1160,16 @@ func nilwalkfwd(fcheck *Flow) {
// avoid problems like:
// _ = *x // should panic
// for {} // no writes but infinite loop may be considered visible
var last *Flow
var last *Flow
for f := Uniqs(fcheck); f != nil; f = Uniqs(f) {
p = f.Prog
info = Thearch.Proginfo(p)
if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
p := f.Prog
if (p.Info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
if (p.Info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
......@@ -1201,12 +1180,12 @@ func nilwalkfwd(fcheck *Flow) {
}
// Stop if value is lost.
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
if (p.Info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
return
}
// Stop if memory write.
if (info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
if (p.Info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
return
}
......
......@@ -972,17 +972,11 @@ func regopt(firstp *obj.Prog) {
firstf = g.Start
var r *Reg
var info ProgInfo
var p *obj.Prog
var bit Bits
var z int
for f := firstf; f != nil; f = f.Link {
p = f.Prog
p := f.Prog
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
info = Thearch.Proginfo(p)
// Avoid making variables for direct-called functions.
if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
......@@ -990,30 +984,29 @@ func regopt(firstp *obj.Prog) {
}
// from vs to doesn't matter for registers.
r = f.Data.(*Reg)
r.use1.b[0] |= info.Reguse | info.Regindex
r.set.b[0] |= info.Regset
r := f.Data.(*Reg)
r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex
r.set.b[0] |= p.Info.Regset
bit = mkvar(f, &p.From)
bit := mkvar(f, &p.From)
if bany(&bit) {
if info.Flags&LeftAddr != 0 {
if p.Info.Flags&LeftAddr != 0 {
setaddrs(bit)
}
if info.Flags&LeftRead != 0 {
for z = 0; z < BITS; z++ {
if p.Info.Flags&LeftRead != 0 {
for z := 0; z < BITS; z++ {
r.use1.b[z] |= bit.b[z]
}
}
if info.Flags&LeftWrite != 0 {
for z = 0; z < BITS; z++ {
if p.Info.Flags&LeftWrite != 0 {
for z := 0; z < BITS; z++ {
r.set.b[z] |= bit.b[z]
}
}
}
// Compute used register for reg
if info.Flags&RegRead != 0 {
if p.Info.Flags&RegRead != 0 {
r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
}
......@@ -1025,16 +1018,16 @@ func regopt(firstp *obj.Prog) {
bit = mkvar(f, &p.To)
if bany(&bit) {
if info.Flags&RightAddr != 0 {
if p.Info.Flags&RightAddr != 0 {
setaddrs(bit)
}
if info.Flags&RightRead != 0 {
for z = 0; z < BITS; z++ {
if p.Info.Flags&RightRead != 0 {
for z := 0; z < BITS; z++ {
r.use2.b[z] |= bit.b[z]
}
}
if info.Flags&RightWrite != 0 {
for z = 0; z < BITS; z++ {
if p.Info.Flags&RightWrite != 0 {
for z := 0; z < BITS; z++ {
r.set.b[z] |= bit.b[z]
}
}
......@@ -1044,8 +1037,8 @@ func regopt(firstp *obj.Prog) {
for i := 0; i < nvar; i++ {
v := &var_[i]
if v.addr != 0 {
bit = blsh(uint(i))
for z = 0; z < BITS; z++ {
bit := blsh(uint(i))
for z := 0; z < BITS; z++ {
addrs.b[z] |= bit.b[z]
}
}
......@@ -1080,12 +1073,12 @@ func regopt(firstp *obj.Prog) {
for f := firstf; f != nil; f = f.Link {
f.Active = 0
r = f.Data.(*Reg)
r := f.Data.(*Reg)
r.act = zbits
}
for f := firstf; f != nil; f = f.Link {
p = f.Prog
p := f.Prog
if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
active++
walkvardef(p.To.Node.(*Node), f, active)
......@@ -1161,7 +1154,7 @@ loop2:
*/
mask := uint64((1 << uint(nreg)) - 1)
for f := firstf; f != nil; f = f.Link {
r = f.Data.(*Reg)
r := f.Data.(*Reg)
r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
r.set.b[0] &^= mask
r.use1.b[0] &^= mask
......@@ -1185,6 +1178,7 @@ loop2:
*/
f = firstf
var bit Bits
if f != nil {
r := f.Data.(*Reg)
for z := 0; z < BITS; z++ {
......@@ -1205,8 +1199,8 @@ loop2:
nregion = 0
var rgp *Rgn
for f := firstf; f != nil; f = f.Link {
r = f.Data.(*Reg)
for z = 0; z < BITS; z++ {
r := f.Data.(*Reg)
for z := 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
}
if bany(&bit) && f.Refset == 0 {
......@@ -1217,7 +1211,7 @@ loop2:
Thearch.Excise(f)
}
for z = 0; z < BITS; z++ {
for z := 0; z < BITS; z++ {
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
}
for bany(&bit) {
......
......@@ -225,6 +225,18 @@ type Prog struct {
Printed uint8
Width int8
Mode int8
Info ProgInfo
}
// ProgInfo holds information about the instruction for use
// by clients such as the compiler. The exact meaning of this
// data is up to the client and is not interpreted by the cmd/internal/obj/... packages.
type ProgInfo struct {
Flags uint32 // flag bits
Reguse uint64 // registers implicitly used by this instruction
Regset uint64 // registers implicitly set by this instruction
Regindex uint64 // registers used by addressing mode
}
// Prog.as opcodes.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment