Commit 118b3fe7 authored by Matthew Dempsky's avatar Matthew Dempsky

cmd/compile/internal/gc: refactor ACALL Prog creation

This abstracts creation of ACALL Progs into package gc. The main
benefit of this today is we can refactor away a lot of common
boilerplate code.

Later, once liveness analysis happens on the SSA graph, this will also
provide an easy insertion point for emitting the PCDATA Progs
immediately before call instructions.

Passes toolstash-check -all.

Change-Id: Ia15108ace97201cd84314f1ca916dfeb4f09d61c
Reviewed-on: https://go-review.googlesource.com/38081Reviewed-by: 's avatarKeith Randall <khr@golang.org>
parent 2e7c3b3f
...@@ -22,6 +22,7 @@ func Init() { ...@@ -22,6 +22,7 @@ func Init() {
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = ssaMarkMoves gc.Thearch.SSAMarkMoves = ssaMarkMoves
......
...@@ -743,39 +743,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -743,39 +743,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = r q.To.Reg = r
} }
case ssa.OpAMD64CALLstatic: case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpAMD64CALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpAMD64CALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
......
...@@ -16,6 +16,7 @@ func Init() { ...@@ -16,6 +16,7 @@ func Init() {
gc.Thearch.MAXWIDTH = (1 << 32) - 1 gc.Thearch.MAXWIDTH = (1 << 32) - 1
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
......
...@@ -625,41 +625,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -625,41 +625,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = v.AuxInt p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg() p.To.Reg = v.Reg()
case ssa.OpARMCALLstatic: case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARMDUFFZERO: case ssa.OpARMDUFFZERO:
p := gc.Prog(obj.ADUFFZERO) p := gc.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
......
...@@ -16,6 +16,7 @@ func Init() { ...@@ -16,6 +16,7 @@ func Init() {
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
......
...@@ -622,41 +622,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -622,41 +622,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4 := gc.Prog(arm64.ABLE) p4 := gc.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p) gc.Patch(p4, p)
case ssa.OpARM64CALLstatic: case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARM64CALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARM64CALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpARM64LoweredNilCheck: case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(arm64.AMOVB) p := gc.Prog(arm64.AMOVB)
......
...@@ -365,10 +365,11 @@ type Arch struct { ...@@ -365,10 +365,11 @@ type Arch struct {
REGSP int REGSP int
MAXWIDTH int64 MAXWIDTH int64
Use387 bool // should 386 backend use 387 FP instructions instead of sse2.
Defframe func(*obj.Prog) Defframe func(*obj.Prog)
Ginsnop func()
Proginfo func(*obj.Prog) ProgInfo Proginfo func(*obj.Prog) ProgInfo
Use387 bool // should 8g use 387 FP instructions instead of sse2.
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block) SSAMarkMoves func(*SSAGenState, *ssa.Block)
......
...@@ -4756,6 +4756,42 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) { ...@@ -4756,6 +4756,42 @@ func (s *SSAGenState) AddrScratch(a *obj.Addr) {
a.Offset = s.ScratchFpMem.Xoffset a.Offset = s.ScratchFpMem.Xoffset
} }
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
Thearch.Ginsnop()
}
p := Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = sym
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch Thearch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
p.To.Type = obj.TYPE_MEM
default:
Fatalf("unknown indirect call family")
}
p.To.Reg = v.Args[0].Reg()
}
if Maxarg < v.AuxInt {
Maxarg = v.AuxInt
}
return p
}
// fieldIdx finds the index of the field referred to by the ODOT node n. // fieldIdx finds the index of the field referred to by the ODOT node n.
func fieldIdx(n *Node) int { func fieldIdx(n *Node) int {
t := n.Left.Type t := n.Left.Type
......
...@@ -19,6 +19,7 @@ func Init() { ...@@ -19,6 +19,7 @@ func Init() {
gc.Thearch.REGSP = mips.REGSP gc.Thearch.REGSP = mips.REGSP
gc.Thearch.MAXWIDTH = (1 << 31) - 1 gc.Thearch.MAXWIDTH = (1 << 31) - 1
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
gc.Thearch.SSAGenValue = ssaGenValue gc.Thearch.SSAGenValue = ssaGenValue
......
...@@ -477,41 +477,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -477,41 +477,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2) gc.Patch(p6, p2)
case ssa.OpMIPSCALLstatic: case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPSLoweredAtomicLoad: case ssa.OpMIPSLoweredAtomicLoad:
gc.Prog(mips.ASYNC) gc.Prog(mips.ASYNC)
......
...@@ -20,6 +20,7 @@ func Init() { ...@@ -20,6 +20,7 @@ func Init() {
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {} gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
......
...@@ -480,41 +480,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -480,41 +480,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.Reg = mips.REG_R1 p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2) gc.Patch(p6, p2)
case ssa.OpMIPS64CALLstatic: case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPS64CALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPS64CALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 0
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpMIPS64LoweredNilCheck: case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(mips.AMOVB) p := gc.Prog(mips.AMOVB)
......
...@@ -19,6 +19,7 @@ func Init() { ...@@ -19,6 +19,7 @@ func Init() {
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop2
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = ssaMarkMoves gc.Thearch.SSAMarkMoves = ssaMarkMoves
......
...@@ -98,3 +98,29 @@ func ginsnop() { ...@@ -98,3 +98,29 @@ func ginsnop() {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0 p.To.Reg = ppc64.REG_R0
} }
func ginsnop2() {
// PPC64 is unusual because TWO nops are required
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
//
// On ppc64, when compiling Go into position
// independent code on ppc64le we insert an
// instruction to reload the TOC pointer from the
// stack as well. See the long comment near
// jmpdefer in runtime/asm_ppc64.s for why.
// If the MOVD is not needed, insert a hardware NOP
// so that the same number of instructions are used
// on ppc64 in both shared and non-shared modes.
ginsnop()
if gc.Ctxt.Flag_shared {
p := gc.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
p.From.Reg = ppc64.REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R2
} else {
ginsnop()
}
}
...@@ -941,45 +941,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -941,45 +941,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
gc.Patch(p4, p) gc.Patch(p4, p)
case ssa.OpPPC64CALLstatic: case ssa.OpPPC64CALLstatic:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert two actual hardware NOPs that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
// PPC64 is unusual because TWO nops are required
// (see gc/cgen.go, gc/plive.go -- copy of comment below)
//
// On ppc64, when compiling Go into position
// independent code on ppc64le we insert an
// instruction to reload the TOC pointer from the
// stack as well. See the long comment near
// jmpdefer in runtime/asm_ppc64.s for why.
// If the MOVD is not needed, insert a hardware NOP
// so that the same number of instructions are used
// on ppc64 in both shared and non-shared modes.
ginsnop()
if gc.Ctxt.Flag_shared {
p := gc.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
p.From.Reg = ppc64.REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R2
} else {
ginsnop()
}
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter: case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
p := gc.Prog(ppc64.AMOVD) p := gc.Prog(ppc64.AMOVD)
...@@ -1001,8 +963,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1001,8 +963,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
q.To.Reg = ppc64.REG_R12 q.To.Reg = ppc64.REG_R12
} }
pp := gc.Prog(obj.ACALL) pp := s.Call(v)
pp.To.Type = obj.TYPE_REG
pp.To.Reg = ppc64.REG_CTR pp.To.Reg = ppc64.REG_CTR
if gc.Ctxt.Flag_shared { if gc.Ctxt.Flag_shared {
...@@ -1018,10 +979,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -1018,10 +979,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
q.To.Reg = ppc64.REG_R2 q.To.Reg = ppc64.REG_R2
} }
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpPPC64LoweredNilCheck: case ssa.OpPPC64LoweredNilCheck:
// Issue a load which will fault if arg is nil. // Issue a load which will fault if arg is nil.
p := gc.Prog(ppc64.AMOVBZ) p := gc.Prog(ppc64.AMOVBZ)
......
...@@ -15,6 +15,7 @@ func Init() { ...@@ -15,6 +15,7 @@ func Init() {
gc.Thearch.MAXWIDTH = 1 << 50 gc.Thearch.MAXWIDTH = 1 << 50
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = ssaMarkMoves gc.Thearch.SSAMarkMoves = ssaMarkMoves
......
...@@ -481,39 +481,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -481,39 +481,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = s390x.REGG p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpS390XCALLstatic: case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XCALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR: ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
......
...@@ -26,6 +26,7 @@ func Init() { ...@@ -26,6 +26,7 @@ func Init() {
gc.Thearch.MAXWIDTH = (1 << 32) - 1 gc.Thearch.MAXWIDTH = (1 << 32) - 1
gc.Thearch.Defframe = defframe gc.Thearch.Defframe = defframe
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Proginfo = proginfo gc.Thearch.Proginfo = proginfo
gc.Thearch.SSAMarkMoves = ssaMarkMoves gc.Thearch.SSAMarkMoves = ssaMarkMoves
......
...@@ -669,39 +669,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -669,39 +669,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
q.To.Type = obj.TYPE_REG q.To.Type = obj.TYPE_REG
q.To.Reg = r q.To.Reg = r
} }
case ssa.Op386CALLstatic: case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
if v.Aux.(*obj.LSym) == gc.Deferreturn { s.Call(v)
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
ginsnop()
}
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = v.Aux.(*obj.LSym)
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.Op386CALLclosure:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.Op386CALLinter:
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.Op386NEGL, case ssa.Op386NEGL,
ssa.Op386BSWAPL, ssa.Op386BSWAPL,
ssa.Op386NOTL: ssa.Op386NOTL:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment