Commit 247fc4a9 authored by Vladimir Stefanovic's avatar Vladimir Stefanovic Committed by Brad Fitzpatrick

cmd/compile/internal/ssa: add support for GOARCH=mips{,le}

Change-Id: I632d4aef7295778ba5018d98bcb06a68bcf07ce1
Reviewed-on: https://go-review.googlesource.com/31478
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarCherry Zhang <cherryyz@google.com>
parent f72a629d
......@@ -36,6 +36,7 @@ type Config struct {
use387 bool // GO386=387
OldArch bool // True for older versions of architecture, e.g. true for PPC64BE, false for PPC64LE
NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool //
DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
curFunc *Func
......@@ -204,6 +205,7 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.noDuffDevice = obj.GOOS == "darwin" // darwin linker cannot handle BR26 reloc with non-zero addend
case "ppc64":
c.OldArch = true
c.BigEndian = true
fallthrough
case "ppc64le":
c.IntSize = 8
......@@ -219,7 +221,10 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
c.NeedsFpScratch = true
c.hasGReg = true
case "mips64", "mips64le":
case "mips64":
c.BigEndian = true
fallthrough
case "mips64le":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
......@@ -245,6 +250,24 @@ func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config
c.LinkReg = linkRegS390X
c.hasGReg = true
c.noDuffDevice = true
c.BigEndian = true
case "mips":
c.BigEndian = true
fallthrough
case "mipsle":
c.IntSize = 4
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlockMIPS
c.lowerValue = rewriteValueMIPS
c.registers = registersMIPS[:]
c.gpRegMask = gpRegMaskMIPS
c.fpRegMask = fpRegMaskMIPS
c.specialRegMask = specialRegMaskMIPS
c.FPReg = framepointerRegMIPS
c.LinkReg = linkRegMIPS
c.hasGReg = true
c.noDuffDevice = true
default:
fe.Fatalf(0, "arch %s not implemented", arch)
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -9,31 +9,57 @@
(Int64Hi (Int64Make hi _)) -> hi
(Int64Lo (Int64Make _ lo)) -> lo
// Assuming little endian (we don't support big endian 32-bit architecture yet)
(Load <t> ptr mem) && is64BitInt(t) && t.IsSigned() ->
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem)
(Load <config.fe.TypeUInt32()> ptr mem))
(Load <t> ptr mem) && is64BitInt(t) && !t.IsSigned() ->
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem)
(Load <config.fe.TypeUInt32()> ptr mem))
(Store [8] dst (Int64Make hi lo) mem) ->
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeInt32()> ptr mem)
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
(Int64Make
(Load <config.fe.TypeUInt32()> ptr mem)
(Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
(Store [8] dst (Int64Make hi lo) mem) && !config.BigEndian ->
(Store [4]
(OffPtr <hi.Type.PtrTo()> [4] dst)
hi
(Store [4] dst lo mem))
(Arg {n} [off]) && is64BitInt(v.Type) && v.Type.IsSigned() ->
(Store [8] dst (Int64Make hi lo) mem) && config.BigEndian ->
(Store [4]
(OffPtr <lo.Type.PtrTo()> [4] dst)
lo
(Store [4] dst hi mem))
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeInt32()> {n} [off+4])
(Arg <config.fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && !v.Type.IsSigned() ->
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeUInt32()> {n} [off+4])
(Arg <config.fe.TypeUInt32()> {n} [off]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeInt32()> {n} [off])
(Arg <config.fe.TypeUInt32()> {n} [off+4]))
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() ->
(Int64Make
(Arg <config.fe.TypeUInt32()> {n} [off])
(Arg <config.fe.TypeUInt32()> {n} [off+4]))
(Add64 x y) ->
(Int64Make
(Add32withcarry <config.fe.TypeInt32()>
......
......@@ -195,13 +195,13 @@ func genOp() {
}
if v.faultOnNilArg0 {
fmt.Fprintln(w, "faultOnNilArg0: true,")
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "" {
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux)
}
}
if v.faultOnNilArg1 {
fmt.Fprintln(w, "faultOnNilArg1: true,")
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "" {
if v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux)
}
}
......
......@@ -184,6 +184,8 @@ func nilcheckelim2(f *Func) {
if v.Aux != nil || off < 0 || off >= minZeroPage {
continue
}
case auxInt32:
// Mips uses this auxType for atomic add constant. It does not affect the effective address.
case auxInt64:
// ARM uses this auxType for duffcopy/duffzero/alignment info.
// It does not affect the effective address.
......
This diff is collapsed.
This diff is collapsed.
......@@ -199,12 +199,12 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && v.Type.IsSigned()
// cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && v.Type.IsSigned()) {
if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
......@@ -219,12 +219,12 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
return true
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && !v.Type.IsSigned()
// cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off+4]) (Arg <config.fe.TypeUInt32()> {n} [off]))
for {
off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && !v.Type.IsSigned()) {
if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
......@@ -238,6 +238,46 @@ func rewriteValuedec64_OpArg(v *Value, config *Config) bool {
v.AddArg(v1)
return true
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeInt32()> {n} [off]) (Arg <config.fe.TypeUInt32()> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeInt32())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
return true
}
// match: (Arg {n} [off])
// cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()
// result: (Int64Make (Arg <config.fe.TypeUInt32()> {n} [off]) (Arg <config.fe.TypeUInt32()> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpArg, config.fe.TypeUInt32())
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
return true
}
return false
}
func rewriteValuedec64_OpBswap64(v *Value, config *Config) bool {
......@@ -744,13 +784,13 @@ func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && t.IsSigned()
// cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
// result: (Int64Make (Load <config.fe.TypeInt32()> (OffPtr <config.fe.TypeInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) && t.IsSigned()) {
if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) {
break
}
v.reset(OpInt64Make)
......@@ -768,13 +808,13 @@ func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
return true
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && !t.IsSigned()
// cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
// result: (Int64Make (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem) (Load <config.fe.TypeUInt32()> ptr mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) && !t.IsSigned()) {
if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) {
break
}
v.reset(OpInt64Make)
......@@ -791,6 +831,54 @@ func rewriteValuedec64_OpLoad(v *Value, config *Config) bool {
v.AddArg(v2)
return true
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
// result: (Int64Make (Load <config.fe.TypeInt32()> ptr mem) (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
// result: (Int64Make (Load <config.fe.TypeUInt32()> ptr mem) (Load <config.fe.TypeUInt32()> (OffPtr <config.fe.TypeUInt32().PtrTo()> [4] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) {
break
}
v.reset(OpInt64Make)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeUInt32())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeUInt32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
return false
}
func rewriteValuedec64_OpLrot64(v *Value, config *Config) bool {
......@@ -2387,7 +2475,7 @@ func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Store [8] dst (Int64Make hi lo) mem)
// cond:
// cond: !config.BigEndian
// result: (Store [4] (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store [4] dst lo mem))
for {
if v.AuxInt != 8 {
......@@ -2401,6 +2489,9 @@ func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
if !(!config.BigEndian) {
break
}
v.reset(OpStore)
v.AuxInt = 4
v0 := b.NewValue0(v.Line, OpOffPtr, hi.Type.PtrTo())
......@@ -2416,6 +2507,39 @@ func rewriteValuedec64_OpStore(v *Value, config *Config) bool {
v.AddArg(v1)
return true
}
// match: (Store [8] dst (Int64Make hi lo) mem)
// cond: config.BigEndian
// result: (Store [4] (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store [4] dst hi mem))
for {
if v.AuxInt != 8 {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpInt64Make {
break
}
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
if !(config.BigEndian) {
break
}
v.reset(OpStore)
v.AuxInt = 4
v0 := b.NewValue0(v.Line, OpOffPtr, lo.Type.PtrTo())
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(lo)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
v1.AddArg(hi)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
return false
}
func rewriteValuedec64_OpSub64(v *Value, config *Config) bool {
......
......@@ -88,7 +88,7 @@ func schedule(f *Func) {
case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr ||
v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr ||
v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr ||
v.Op == OpS390XLoweredGetClosurePtr:
v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the
// context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
......@@ -100,7 +100,7 @@ func schedule(f *Func) {
case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
v.Op == OpS390XLoweredNilCheck:
v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck:
// Nil checks must come before loads from the same address.
score[v.ID] = ScoreNilCheck
case v.Op == OpPhi:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment