Commit 03d152f3 authored by Lynn Boger's avatar Lynn Boger Committed by David Chase

[dev.ssa] cmd/compile: Add initial SSA configuration for PPC64

This adds the initial SSA implementation for PPC64.
Builds golang and all.bash runs correctly.  Simple hello.go
builds but does not run.

Change-Id: I7cec211b934cd7a2dd75a6cdfaf9f71867063466
Reviewed-on: https://go-review.googlesource.com/24453Reviewed-by: 's avatarDavid Chase <drchase@google.com>
Reviewed-by: 's avatarCherry Zhang <cherryyz@google.com>
Run-TryBot: David Chase <drchase@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 68dc102e
......@@ -4134,6 +4134,19 @@ func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJ
}
}
func AuxOffset(v *ssa.Value) (offset int64) {
if v.Aux == nil {
return 0
}
switch sym := v.Aux.(type) {
case *ssa.AutoSymbol:
n := sym.Node.(*Node)
return n.Xoffset
}
return 0
}
// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
func AddAux(a *obj.Addr, v *ssa.Value) {
AddAux2(a, v, v.AuxInt)
......
......@@ -66,6 +66,11 @@ func Main() {
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Thearch.SSARegToReg = ssaRegToReg
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
initvariants()
initproginfo()
......
......@@ -58,6 +58,8 @@ var progtable = [ppc64.ALAST & obj.AMask]obj.ProgInfo{
ppc64.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
ppc64.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
ppc64.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
ppc64.ACMPW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
ppc64.ACMPWU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightRead},
ppc64.ATD & obj.AMask: {Flags: gc.SizeQ | gc.RightRead},
// Floating point.
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ppc64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
var ssaRegToReg = []int16{
ppc64.REGZERO,
ppc64.REGSP,
ppc64.REG_R2,
ppc64.REG_R3,
ppc64.REG_R4,
ppc64.REG_R5,
ppc64.REG_R6,
ppc64.REG_R7,
ppc64.REG_R8,
ppc64.REG_R9,
ppc64.REG_R10,
ppc64.REGCTXT,
ppc64.REG_R12,
ppc64.REG_R13,
ppc64.REG_R14,
ppc64.REG_R15,
ppc64.REG_R16,
ppc64.REG_R17,
ppc64.REG_R18,
ppc64.REG_R19,
ppc64.REG_R20,
ppc64.REG_R21,
ppc64.REG_R22,
ppc64.REG_R23,
ppc64.REG_R24,
ppc64.REG_R25,
ppc64.REG_R26,
ppc64.REG_R27,
ppc64.REG_R28,
ppc64.REG_R29,
ppc64.REGG,
ppc64.REGTMP,
}
// Associated condition bit
var condBits = map[ssa.Op]uint8{
ssa.OpPPC64Equal: ppc64.C_COND_EQ,
ssa.OpPPC64NotEqual: ppc64.C_COND_EQ,
ssa.OpPPC64LessThan: ppc64.C_COND_LT,
ssa.OpPPC64GreaterEqual: ppc64.C_COND_LT,
ssa.OpPPC64GreaterThan: ppc64.C_COND_GT,
ssa.OpPPC64LessEqual: ppc64.C_COND_GT,
}
// Is the condition bit set? 1=yes 0=no
var condBitSet = map[ssa.Op]uint8{
ssa.OpPPC64Equal: 1,
ssa.OpPPC64NotEqual: 0,
ssa.OpPPC64LessThan: 1,
ssa.OpPPC64GreaterEqual: 0,
ssa.OpPPC64GreaterThan: 1,
ssa.OpPPC64LessEqual: 0,
}
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
// flive := b.FlagsLiveAtEnd
// if b.Control != nil && b.Control.Type.IsFlags() {
// flive = true
// }
// for i := len(b.Values) - 1; i >= 0; i-- {
// v := b.Values[i]
// if flive && (v.Op == ssa.OpPPC64MOVWconst || v.Op == ssa.OpPPC64MOVDconst) {
// // The "mark" is any non-nil Aux value.
// v.Aux = v
// }
// if v.Type.IsFlags() {
// flive = false
// }
// for _, a := range v.Args {
// if a.Type.IsFlags() {
// flive = true
// }
// }
// }
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
s.SetLineno(v.Line)
switch v.Op {
case ssa.OpInitMem:
// memory arg needs no code
case ssa.OpArg:
// input args need no code
case ssa.OpSP, ssa.OpSB:
// nothing to do
case ssa.OpCopy:
case ssa.OpLoadReg:
// TODO: by type
p := gc.Prog(ppc64.AMOVD)
n, off := gc.AutoVar(v.Args[0])
p.From.Type = obj.TYPE_MEM
p.From.Node = n
p.From.Sym = gc.Linksym(n.Sym)
p.From.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.From.Name = obj.NAME_PARAM
p.From.Offset += n.Xoffset
} else {
p.From.Name = obj.NAME_AUTO
}
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpStoreReg:
// TODO: by type
p := gc.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
n, off := gc.AutoVar(v)
p.To.Type = obj.TYPE_MEM
p.To.Node = n
p.To.Sym = gc.Linksym(n.Sym)
p.To.Offset = off
if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
p.To.Name = obj.NAME_PARAM
p.To.Offset += n.Xoffset
} else {
p.To.Name = obj.NAME_AUTO
}
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64XOR:
r := gc.SSARegNum(v)
r1 := gc.SSARegNum(v.Args[0])
r2 := gc.SSARegNum(v.Args[1])
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r2
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64NEG:
r := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm())
if r != gc.SSARegNum(v.Args[0]) {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst:
p := gc.Prog(v.Op.Asm())
p.Reg = gc.SSARegNum(v.Args[0])
if v.Aux != nil {
p.From.Type = obj.TYPE_CONST
p.From.Offset = gc.AuxOffset(v)
} else {
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
}
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpPPC64MOVDconst, ssa.OpPPC64MOVWconst, ssa.OpPPC64MOVHconst, ssa.OpPPC64MOVBconst, ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpPPC64FCMPU:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.Reg = gc.SSARegNum(v.Args[0])
case ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v.Args[0])
case ssa.OpPPC64CMPconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = gc.SSARegNum(v.Args[0])
case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
// Shift in register to required size
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_REG
case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVBload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v)
case ssa.OpPPC64MOVDstoreconst, ssa.OpPPC64MOVWstoreconst, ssa.OpPPC64MOVHstoreconst, ssa.OpPPC64MOVBstoreconst:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[1])
p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux(&p.To, v)
case ssa.OpPPC64CALLstatic:
// TODO: deferreturn
p := gc.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
if gc.Maxarg < v.AuxInt {
gc.Maxarg = v.AuxInt
}
case ssa.OpVarDef:
gc.Gvardef(v.Aux.(*gc.Node))
case ssa.OpVarKill:
gc.Gvarkill(v.Aux.(*gc.Node))
case ssa.OpVarLive:
gc.Gvarlive(v.Aux.(*gc.Node))
case ssa.OpPPC64Equal,
ssa.OpPPC64NotEqual,
ssa.OpPPC64LessThan,
ssa.OpPPC64LessEqual,
ssa.OpPPC64GreaterThan,
ssa.OpPPC64GreaterEqual:
v.Fatalf("pseudo-op made it to output: %s", v.LongString())
case ssa.OpPhi:
// just check to make sure regalloc and stackalloc did it right
if v.Type.IsMemory() {
return
}
f := v.Block.Func
loc := f.RegAlloc[v.ID]
for _, a := range v.Args {
if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
}
}
default:
v.Unimplementedf("genValue not implemented: %s", v.LongString())
}
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
s.SetLineno(b.Line)
switch b.Kind {
case ssa.BlockCall:
if b.Succs[0].Block() != next {
p := gc.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockRet:
gc.Prog(obj.ARET)
}
}
......@@ -155,6 +155,15 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.flagRegMask = flagRegMaskARM
c.FPReg = framepointerRegARM
c.hasGReg = true
case "ppc64le":
c.IntSize = 8
c.PtrSize = 8
c.lowerBlock = rewriteBlockPPC64
c.lowerValue = rewriteValuePPC64
c.registers = registersPPC64[:]
c.gpRegMask = gpRegMaskPPC64
c.fpRegMask = fpRegMaskPPC64
c.FPReg = framepointerRegPPC64
default:
fe.Unimplementedf(0, "arch %s not implemented", arch)
}
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Lowering arithmetic
(Add64 x y) -> (ADD x y)
(AddPtr x y) -> (ADD x y)
(Add32 x y) -> (ADD (SignExt32to64 x) (SignExt32to64 y))
(Add16 x y) -> (ADD (SignExt16to64 x) (SignExt16to64 y))
(Add8 x y) -> (ADD (SignExt8to64 x) (SignExt8to64 y))
(Add64F x y) -> (FADD x y)
(Add32F x y) -> (FADDS x y)
(Sub64 x y) -> (SUB x y)
(SubPtr x y) -> (SUB x y)
(Sub32 x y) -> (SUB x y)
(Sub16 x y) -> (SUB (SignExt16to64 x) (SignExt16to64 y))
(Sub8 x y) -> (SUB (SignExt8to64 x) (SignExt8to64 y))
(Sub32F x y) -> (FSUBS x y)
(Sub64F x y) -> (FSUB x y)
(Mul64 x y) -> (MULLD x y)
(Mul32 x y) -> (MULLW x y)
(Mul16 x y) -> (MULLW (SignExt16to32 x) (SignExt16to32 y))
(Mul8 x y) -> (MULLW (SignExt8to32 x) (SignExt8to32 y))
(Mul32F x y) -> (FMULS x y)
(Mul64F x y) -> (FMUL x y)
(Div32F x y) -> (FDIVS x y)
(Div64F x y) -> (FDIV x y)
// Lowering constants
(Const8 [val]) -> (MOVWconst [val])
(Const16 [val]) -> (MOVWconst [val])
(Const32 [val]) -> (MOVWconst [val])
(Const64 [val]) -> (MOVDconst [val])
(Const32F [val]) -> (FMOVSconst [val])
(Const64F [val]) -> (FMOVDconst [val])
(ConstNil) -> (MOVDconst [0])
(ConstBool [b]) -> (MOVBconst [b])
(Addr {sym} base) -> (ADDconst {sym} base)
(OffPtr [off] ptr) -> (ADD (MOVDconst <config.Frontend().TypeInt64()> [off]) ptr)
(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
(And64 x y) -> (AND x y)
(And32 x y) -> (AND (ZeroExt32to64 x) (ZeroExt32to64 y)) // Or? (AND (ZeroExt32to64 x) (ZeroExt32to64 y))
(And16 x y) -> (AND (ZeroExt16to64 x) (ZeroExt16to64 y))
(And8 x y) -> (AND (ZeroExt8to64 x) (ZeroExt8to64 y))
(Or64 x y) -> (OR x y)
(Or32 x y) -> (OR (ZeroExt32to64 x) (ZeroExt32to64 y))
(Or16 x y) -> (OR (ZeroExt16to64 x) (ZeroExt16to64 y))
(Or8 x y) -> (OR (ZeroExt8to64 x) (ZeroExt8to64 y))
(Xor64 x y) -> (XOR x y)
(Xor32 x y) -> (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))
(Xor16 x y) -> (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))
(Xor8 x y) -> (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))
(Neg64 x) -> (NEG x)
(Neg32 x) -> (NEG (ZeroExt32to64 x))
(Neg16 x) -> (NEG (ZeroExt16to64 x))
(Neg8 x) -> (NEG (ZeroExt8to64 x))
// Lowering comparisons
(Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Eq32 x y) -> (Equal (CMPW x y))
(Eq64 x y) -> (Equal (CMP x y))
(EqPtr x y) -> (Equal (CMP x y))
(Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Neq32 x y) -> (NotEqual (CMPW x y))
(Neq64 x y) -> (NotEqual (CMP x y))
(NeqPtr x y) -> (NotEqual (CMP x y))
(Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Less32 x y) -> (LessThan (CMPW x y))
(Less64 x y) -> (LessThan (CMP x y))
(Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Less32U x y) -> (LessThan (CMPWU x y))
(Less64U x y) -> (LessThan (CMPU x y))
(Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Leq32 x y) -> (LessEqual (CMPW x y))
(Leq64 x y) -> (LessEqual (CMP x y))
(Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Leq32U x y) -> (LessEqual (CMPWU x y))
(Leq64U x y) -> (LessEqual (CMPU x y))
(Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Greater32 x y) -> (GreaterThan (CMPW x y))
(Greater64 x y) -> (GreaterThan (CMP x y))
(Greater8U x y) -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Greater32U x y) -> (GreaterThan (CMPWU x y))
(Greater64U x y) -> (GreaterThan (CMPU x y))
(Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
(Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
(Geq32 x y) -> (GreaterEqual (CMPW x y))
(Geq64 x y) -> (GreaterEqual (CMP x y))
(Geq8U x y) -> (GreaterEqual (CMPU (ZeroExt8to32 x) (ZeroExt8to32 y)))
(Geq16U x y) -> (GreaterEqual (CMPU (ZeroExt16to32 x) (ZeroExt16to32 y)))
(Geq32U x y) -> (GreaterEqual (CMPU x y))
(Geq64U x y) -> (GreaterEqual (CMPU x y))
(Less64F x y) -> (LessThan (FCMPU x y))
(Leq64F x y) -> (LessEqual (FCMPU x y)) // ??
(Eq64F x y) -> (Equal (FCMPU x y))
(Neq64F x y) -> (NotEqual (FCMPU x y))
// Absorb pseudo-ops into blocks.
(If (Equal cc) yes no) -> (EQ cc yes no)
(If (NotEqual cc) yes no) -> (NE cc yes no)
(If (LessThan cc) yes no) -> (LT cc yes no)
(If (LessEqual cc) yes no) -> (LE cc yes no)
(If (GreaterThan cc) yes no) -> (GT cc yes no)
(If (GreaterEqual cc) yes no) -> (GE cc yes no)
(If cond yes no) -> (NE (CMPconst [0] cond) yes no)
// Absorb boolean tests into block
(NE (CMPconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
(NE (CMPconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
(NE (CMPconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
(NE (CMPconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
(NE (CMPconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
(NE (CMPconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
// Lowering loads
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) -> (MOVBload ptr mem)
(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
(Store [8] ptr val mem) -> (MOVDstore ptr val mem)
(Store [4] ptr val mem) -> (MOVWstore ptr val mem)
(Store [2] ptr val mem) -> (MOVHstore ptr val mem)
(Store [1] ptr val mem) -> (MOVBstore ptr val mem)
(Zero [0] _ mem) -> mem
(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
(Zero [2] destptr mem) -> (MOVHstoreconst [0] destptr mem)
(Zero [4] destptr mem) -> (MOVWstoreconst [0] destptr mem)
(Zero [8] destptr mem) -> (MOVDstoreconst [0] destptr mem)
(Zero [3] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,2)] destptr
(MOVHstoreconst [0] destptr mem))
(Zero [5] destptr mem) ->
(MOVBstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
(Zero [6] destptr mem) ->
(MOVHstoreconst [makeValAndOff(0,4)] destptr
(MOVWstoreconst [0] destptr mem))
// Zero small numbers of words directly.
(Zero [16] destptr mem) ->
(MOVDstoreconst [makeValAndOff(0,8)] destptr
(MOVDstoreconst [0] destptr mem))
(Zero [24] destptr mem) ->
(MOVDstoreconst [makeValAndOff(0,16)] destptr
(MOVDstoreconst [makeValAndOff(0,8)] destptr
(MOVDstoreconst [0] destptr mem)))
(Zero [32] destptr mem) ->
(MOVDstoreconst [makeValAndOff(0,24)] destptr
(MOVDstoreconst [makeValAndOff(0,16)] destptr
(MOVDstoreconst [makeValAndOff(0,8)] destptr
(MOVDstoreconst [0] destptr mem))))
// Optimizations
(ADD (MOVDconst [c]) x) -> (ADDconst [c] x)
(ADD x (MOVDconst [c])) -> (ADDconst [c] x)
// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
(SignExt8to16 x) -> (MOVBreg x)
(SignExt8to32 x) -> (MOVBreg x)
(SignExt8to64 x) -> (MOVBreg x)
(SignExt16to32 x) -> (MOVHreg x)
(SignExt16to64 x) -> (MOVHreg x)
(SignExt32to64 x) -> (MOVWreg x)
(ZeroExt8to16 x) -> (MOVBZreg x)
(ZeroExt8to32 x) -> (MOVBZreg x)
(ZeroExt8to64 x) -> (MOVBZreg x)
(ZeroExt16to32 x) -> (MOVHZreg x)
(ZeroExt16to64 x) -> (MOVHZreg x)
(ZeroExt32to64 x) -> (MOVWZreg x)
(Trunc16to8 x) -> (MOVBreg x)
(Trunc32to8 x) -> (MOVBreg x)
(Trunc32to16 x) -> (MOVHreg x)
(Trunc64to8 x) -> (MOVBreg x)
(Trunc64to16 x) -> (MOVHreg x)
(Trunc64to32 x) -> (MOVWreg x)
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import "strings"
var regNamesPPC64 = []string{
"R0", // REGZERO
"SP", // REGSP
"SB", // REGSB
"R3",
"R4",
"R5",
"R6",
"R7",
"R8",
"R9",
"R10",
"R11", // REGCTXT for closures
"R12",
"R13", // REGTLS
"R14",
"R15",
"R16",
"R17",
"R18",
"R19",
"R20",
"R21",
"R22",
"R23",
"R24",
"R25",
"R26",
"R27",
"R28",
"R29",
"R30", // REGG
"R31", // REGTMP
"F0",
"F1",
"F2",
"F3",
"F4",
"F5",
"F6",
"F7",
"F8",
"F9",
"F10",
"F11",
"F12",
"F13",
"F14",
"F15",
"F16",
"F17",
"F18",
"F19",
"F20",
"F21",
"F22",
"F23",
"F24",
"F25",
"F26",
"CR",
}
func init() {
// Make map from reg names to reg integers.
if len(regNamesPPC64) > 64 {
panic("too many registers")
}
num := map[string]int{}
for i, name := range regNamesPPC64 {
num[name] = i
}
buildReg := func(s string) regMask {
m := regMask(0)
for _, r := range strings.Split(s, " ") {
if n, ok := num[r]; ok {
m |= regMask(1) << uint(n)
continue
}
panic("register " + r + " not found")
}
return m
}
var (
gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29")
fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26")
sp = buildReg("SP")
// sb = buildReg("SB")
// gg = buildReg("R30")
cr = buildReg("CR")
// tmp = buildReg("R31")
// ctxt = buildReg("R11")
// tls = buildReg("R13")
gp01 = regInfo{inputs: []regMask{}, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gp | sp}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gp | sp, gp | sp}, outputs: []regMask{gp}}
gp1cr = regInfo{inputs: []regMask{gp | sp}, outputs: []regMask{cr}}
gp2cr = regInfo{inputs: []regMask{gp | sp, gp | sp}, outputs: []regMask{cr}}
crgp = regInfo{inputs: []regMask{cr}, outputs: []regMask{gp}}
gpload = regInfo{inputs: []regMask{gp | sp}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gp | sp, gp | sp}, outputs: []regMask{}}
gpstoreconst = regInfo{inputs: []regMask{gp | sp, 0}, outputs: []regMask{}}
fp01 = regInfo{inputs: []regMask{}, outputs: []regMask{fp}}
// fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
fp2cr = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{cr}}
fpload = regInfo{inputs: []regMask{gp | sp}, outputs: []regMask{fp}}
fpstore = regInfo{inputs: []regMask{fp, gp | sp}, outputs: []regMask{}}
callerSave = regMask(gp | fp)
)
ops := []opData{
{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
{name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "SymOff"}, // arg0 + auxInt + aux.(*gc.Sym)
{name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
{name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
{name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
{name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
{name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", commutative: true}, // arg0*arg1
{name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", commutative: true}, // arg0*arg1
{name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
{name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1
{name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1
{name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
{name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0&arg1 ??
{name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
{name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32"}, // arg0|arg1 ??
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0^arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32"}, // arg0|arg1 ??
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // ^arg0
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // sign extend int8 to int64
{name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ"}, // zero extend uint8 to uint64
{name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // sign extend int16 to int64
{name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ"}, // zero extend uint16 to uint64
{name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // sign extend int32 to int64
{name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ"}, // zero extend uint32 to uint64
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", typ: "Int8"}, // sign extend int8 to int64
{name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64
{name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64
{name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64
{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64
{name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64
{name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", typ: "UInt64"},
{name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", typ: "Fload64"},
{name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", typ: "Float32"},
{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"},
{name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem"},
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"},
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem"},
{name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem"},
{name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem"},
{name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
{name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ...
{name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ...
{name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem"}, // store 8 bytes of ...
{name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", rematerializeable: true}, //
{name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", rematerializeable: true}, // 32 low bits of auxint
{name: "MOVHconst", argLength: 0, reg: gp01, aux: "Int16", asm: "MOVH", rematerializeable: true}, // 16 low bits of auxint
{name: "MOVBconst", argLength: 0, reg: gp01, aux: "Int8", asm: "MOVB", rematerializeable: true}, // 8 low bits of auxint
{name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, //
{name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, //
{name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"},
{name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int32", typ: "Flags"},
{name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise.
{name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise.
{name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode signed x<y false otherwise.
{name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode signed x<=y false otherwise.
{name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode signed x>y false otherwise.
{name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode signed x>=y false otherwise.
}
blocks := []blockData{
{name: "EQ"},
{name: "NE"},
{name: "LT"},
{name: "LE"},
{name: "GT"},
{name: "GE"},
{name: "ULT"},
{name: "ULE"},
{name: "UGT"},
{name: "UGE"},
}
archs = append(archs, arch{
name: "PPC64",
pkg: "cmd/internal/obj/ppc64",
genfile: "../../ppc64/ssa.go",
ops: ops,
blocks: blocks,
regnames: regNamesPPC64,
gpregmask: gp,
fpregmask: fp,
framepointerreg: int8(num["SP"]),
})
}
This diff is collapsed.
This diff is collapsed.
......@@ -185,6 +185,15 @@ const (
NOSCHED = 1 << 9
)
// Bit settings from the CR
const (
C_COND_LT = iota // 0 result is negative
C_COND_GT // 1 result is positive
C_COND_EQ // 2 result is zero
C_COND_SO // 3 summary overflow
)
const (
C_NONE = iota
C_REG
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment