Commit 7fc56219 authored by Keith Randall's avatar Keith Randall

cmd/compile: define high bits of AuxInt

Previously if we were only using the low bits of AuxInt,
the high bits were ignored and could be junk.  This CL
changes that behavior to define the high bits to be the
sign-extended version of the low bits for all cases.

There are 2 main benefits:
- Deterministic representation.  This helps with CSE.
  (Const8 [0x1]) and (Const8 [0x101]) used to be the same "value"
  but CSE couldn't see them as such.
- Testability.  We can check that all ops leave AuxInt in a state
  consistent with the new rule.  In the old scheme, it was hard
  to check whether a rule correctly used only the low-order bits.
Side benefits:
- ==0 and !=0 tests are easier.

Drawbacks:
- This differs from the runtime representation in registers,
  where it is important that we allow upper bits to be undefined
  (so we're not sign/zero-extending all the time).
- Ops that treat AuxInt as unsigned (shifts, mostly) need to be
  a bit more careful.

Change-Id: I9a685ff27e36dc03287c9ab1cecd6c0b4045c819
Reviewed-on: https://go-review.googlesource.com/21256Reviewed-by: 's avatarJosh Bleecher Snyder <josharian@gmail.com>
parent 18072adb
...@@ -420,7 +420,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -420,7 +420,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
r := gc.SSARegNum(v) r := gc.SSARegNum(v)
a := gc.SSARegNum(v.Args[0]) a := gc.SSARegNum(v.Args[0])
if r == a { if r == a {
if v.AuxInt2Int64() == 1 { if v.AuxInt == 1 {
var asm obj.As var asm obj.As
switch v.Op { switch v.Op {
// Software optimization manual recommends add $1,reg. // Software optimization manual recommends add $1,reg.
...@@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
} else if v.AuxInt2Int64() == -1 { } else if v.AuxInt == -1 {
var asm obj.As var asm obj.As
switch v.Op { switch v.Op {
case ssa.OpAMD64ADDQconst: case ssa.OpAMD64ADDQconst:
...@@ -456,7 +456,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -456,7 +456,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} else { } else {
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
return return
...@@ -474,7 +474,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -474,7 +474,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(asm) p := gc.Prog(asm)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = a p.From.Reg = a
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
...@@ -494,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -494,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Constant into AX, after arg0 movement in case arg0 is in AX // Constant into AX, after arg0 movement in case arg0 is in AX
p := gc.Prog(moveByType(v.Type)) p := gc.Prog(moveByType(v.Type))
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX p.To.Reg = x86.REG_AX
...@@ -516,7 +516,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -516,7 +516,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
// TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2 // TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2
...@@ -531,7 +531,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -531,7 +531,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// a = b + (- const), saves us 1 instruction. We can't fit // a = b + (- const), saves us 1 instruction. We can't fit
// - (-1 << 31) into 4 bytes offset in lea. // - (-1 << 31) into 4 bytes offset in lea.
// We handle 2-address just fine below. // We handle 2-address just fine below.
if v.AuxInt2Int64() == -1<<31 || x == r { if v.AuxInt == -1<<31 || x == r {
if x != r { if x != r {
// This code compensates for the fact that the register allocator // This code compensates for the fact that the register allocator
// doesn't understand 2-address instructions yet. TODO: fix that. // doesn't understand 2-address instructions yet. TODO: fix that.
...@@ -543,10 +543,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -543,10 +543,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == -1 { } else if x == r && v.AuxInt == -1 {
var asm obj.As var asm obj.As
// x = x - (-1) is the same as x++ // x = x - (-1) is the same as x++
// See OpAMD64ADDQconst comments about inc vs add $1,reg // See OpAMD64ADDQconst comments about inc vs add $1,reg
...@@ -561,7 +561,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -561,7 +561,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(asm) p := gc.Prog(asm)
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == 1 { } else if x == r && v.AuxInt == 1 {
var asm obj.As var asm obj.As
switch v.Op { switch v.Op {
case ssa.OpAMD64SUBQconst: case ssa.OpAMD64SUBQconst:
...@@ -587,7 +587,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -587,7 +587,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(asm) p := gc.Prog(asm)
p.From.Type = obj.TYPE_MEM p.From.Type = obj.TYPE_MEM
p.From.Reg = x p.From.Reg = x
p.From.Offset = -v.AuxInt2Int64() p.From.Offset = -v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
} }
...@@ -614,7 +614,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -614,7 +614,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
} }
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = r p.To.Reg = r
case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask: case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
...@@ -661,18 +661,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -661,18 +661,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG p.From.Type = obj.TYPE_REG
p.From.Reg = gc.SSARegNum(v.Args[0]) p.From.Reg = gc.SSARegNum(v.Args[0])
p.To.Type = obj.TYPE_CONST p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt2Int64() p.To.Offset = v.AuxInt
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst: case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst:
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Reg = gc.SSARegNum(v.Args[0])
case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := gc.SSARegNum(v) x := gc.SSARegNum(v)
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = x p.To.Reg = x
// If flags are live at this instruction, suppress the // If flags are live at this instruction, suppress the
...@@ -804,17 +804,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -804,17 +804,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
i := sc.Val() p.From.Offset = sc.Val()
switch v.Op {
case ssa.OpAMD64MOVBstoreconst:
i = int64(int8(i))
case ssa.OpAMD64MOVWstoreconst:
i = int64(int16(i))
case ssa.OpAMD64MOVLstoreconst:
i = int64(int32(i))
case ssa.OpAMD64MOVQstoreconst:
}
p.From.Offset = i
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
p.To.Reg = gc.SSARegNum(v.Args[0]) p.To.Reg = gc.SSARegNum(v.Args[0])
gc.AddAux2(&p.To, v, sc.Off()) gc.AddAux2(&p.To, v, sc.Off())
...@@ -822,18 +812,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -822,18 +812,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff() sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
switch v.Op { switch v.Op {
case ssa.OpAMD64MOVBstoreconstidx1: case ssa.OpAMD64MOVBstoreconstidx1:
p.From.Offset = int64(int8(sc.Val()))
p.To.Scale = 1 p.To.Scale = 1
case ssa.OpAMD64MOVWstoreconstidx2: case ssa.OpAMD64MOVWstoreconstidx2:
p.From.Offset = int64(int16(sc.Val()))
p.To.Scale = 2 p.To.Scale = 2
case ssa.OpAMD64MOVLstoreconstidx4: case ssa.OpAMD64MOVLstoreconstidx4:
p.From.Offset = int64(int32(sc.Val()))
p.To.Scale = 4 p.To.Scale = 4
case ssa.OpAMD64MOVQstoreconstidx8: case ssa.OpAMD64MOVQstoreconstidx8:
p.From.Offset = sc.Val()
p.To.Scale = 8 p.To.Scale = 8
} }
p.To.Type = obj.TYPE_MEM p.To.Type = obj.TYPE_MEM
......
...@@ -85,7 +85,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ...@@ -85,7 +85,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpARMMOVWconst: case ssa.OpARMMOVWconst:
p := gc.Prog(v.Op.Asm()) p := gc.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt2Int64() p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG
p.To.Reg = gc.SSARegNum(v) p.To.Reg = gc.SSARegNum(v)
case ssa.OpARMCMP: case ssa.OpARMCMP:
......
...@@ -171,7 +171,27 @@ func checkFunc(f *Func) { ...@@ -171,7 +171,27 @@ func checkFunc(f *Func) {
canHaveAuxInt := false canHaveAuxInt := false
switch opcodeTable[v.Op].auxType { switch opcodeTable[v.Op].auxType {
case auxNone: case auxNone:
case auxBool, auxInt8, auxInt16, auxInt32, auxInt64, auxFloat64: case auxBool:
if v.AuxInt < 0 || v.AuxInt > 1 {
f.Fatalf("bad bool AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxInt8:
if v.AuxInt != int64(int8(v.AuxInt)) {
f.Fatalf("bad int8 AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxInt16:
if v.AuxInt != int64(int16(v.AuxInt)) {
f.Fatalf("bad int16 AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxInt32:
if v.AuxInt != int64(int32(v.AuxInt)) {
f.Fatalf("bad int32 AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxInt64, auxFloat64:
canHaveAuxInt = true canHaveAuxInt = true
case auxFloat32: case auxFloat32:
canHaveAuxInt = true canHaveAuxInt = true
......
...@@ -2,13 +2,6 @@ ...@@ -2,13 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// x86 register conventions:
// - Integer types live in the low portion of registers. Upper portions are junk.
// - Boolean types use the low-order byte of a register. Upper bytes are junk.
// - We do not use AH,BH,CH,DH registers.
// - Floating-point types will live in the low natural slot of an sse2 register.
// Unused portions are junk.
// Lowering arithmetic // Lowering arithmetic
(Add64 x y) -> (ADDQ x y) (Add64 x y) -> (ADDQ x y)
(AddPtr x y) -> (ADDQ x y) (AddPtr x y) -> (ADDQ x y)
...@@ -1162,33 +1155,33 @@ ...@@ -1162,33 +1155,33 @@
// generic constant folding // generic constant folding
// TODO: more of this // TODO: more of this
(ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d])
(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [c+d]) (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))])
(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [c+d]) (ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c+d))])
(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [c+d]) (ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c+d))])
(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x)
(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [c+d] x) (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x)
(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [c+d] x) (ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int16(c+d))] x)
(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [c+d] x) (ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [int64(int8(c+d))] x)
(SUBQconst [c] (MOVQconst [d])) -> (MOVQconst [d-c]) (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c])
(SUBLconst [c] (MOVLconst [d])) -> (MOVLconst [d-c]) (SUBLconst (MOVLconst [d]) [c]) -> (MOVLconst [int64(int32(d-c))])
(SUBWconst [c] (MOVWconst [d])) -> (MOVWconst [d-c]) (SUBWconst (MOVWconst [d]) [c]) -> (MOVWconst [int64(int16(d-c))])
(SUBBconst [c] (MOVBconst [d])) -> (MOVBconst [d-c]) (SUBBconst (MOVBconst [d]) [c]) -> (MOVBconst [int64(int8(d-c))])
(SUBQconst [c] (SUBQconst [d] x)) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x)
(SUBLconst [c] (SUBLconst [d] x)) -> (ADDLconst [-c-d] x) (SUBLconst (SUBLconst x [d]) [c]) -> (ADDLconst [int64(int32(-c-d))] x)
(SUBWconst [c] (SUBWconst [d] x)) -> (ADDWconst [-c-d] x) (SUBWconst (SUBWconst x [d]) [c]) -> (ADDWconst [int64(int16(-c-d))] x)
(SUBBconst [c] (SUBBconst [d] x)) -> (ADDBconst [-c-d] x) (SUBBconst (SUBBconst x [d]) [c]) -> (ADDBconst [int64(int8(-c-d))] x)
(SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)])
(NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) (NEGQ (MOVQconst [c])) -> (MOVQconst [-c])
(NEGL (MOVLconst [c])) -> (MOVLconst [-c]) (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))])
(NEGW (MOVWconst [c])) -> (MOVWconst [-c]) (NEGW (MOVWconst [c])) -> (MOVWconst [int64(int16(-c))])
(NEGB (MOVBconst [c])) -> (MOVBconst [-c]) (NEGB (MOVBconst [c])) -> (MOVBconst [int64(int8(-c))])
(MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d])
(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [c*d]) (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))])
(MULWconst [c] (MOVWconst [d])) -> (MOVWconst [c*d]) (MULWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c*d))])
(MULBconst [c] (MOVBconst [d])) -> (MOVBconst [c*d]) (MULBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c*d))])
(ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d])
(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d])
(ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) (ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
......
...@@ -6,6 +6,25 @@ package main ...@@ -6,6 +6,25 @@ package main
import "strings" import "strings"
// Notes:
// - Integer types live in the low portion of registers. Upper portions are junk.
// - Boolean types use the low-order byte of a register. 0=false, 1=true.
// Upper bytes are junk.
// - Floating-point types live in the low natural slot of an sse2 register.
// Unused portions are junk.
// - We do not use AH,BH,CH,DH registers.
// - When doing sub-register operations, we try to write the whole
// destination register to avoid a partial-register write.
// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
// filled by sign-extending the used portion. Users of AuxInt which interpret
// AuxInt as unsigned (e.g. shifts) must be careful.
// Suffixes encode the bit width of various instructions.
// Q (quad word) = 64 bit
// L (long word) = 32 bit
// W (word) = 16 bit
// B (byte) = 8 bit
// copied from ../../amd64/reg.go // copied from ../../amd64/reg.go
var regNamesAMD64 = []string{ var regNamesAMD64 = []string{
"AX", "AX",
...@@ -129,7 +148,6 @@ func init() { ...@@ -129,7 +148,6 @@ func init() {
gpfp = regInfo{inputs: gponly, outputs: fponly} gpfp = regInfo{inputs: gponly, outputs: fponly}
fp11 = regInfo{inputs: fponly, outputs: fponly} fp11 = regInfo{inputs: fponly, outputs: fponly}
fp2flags = regInfo{inputs: []regMask{fp, fp}, outputs: flagsonly} fp2flags = regInfo{inputs: []regMask{fp, fp}, outputs: flagsonly}
// fp1flags = regInfo{inputs: fponly, outputs: flagsonly}
fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly} fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly} fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
...@@ -137,12 +155,7 @@ func init() { ...@@ -137,12 +155,7 @@ func init() {
fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
) )
// TODO: most ops clobber flags
// Suffixes encode the bit width of various instructions.
// Q = 64 bit, L = 32 bit, W = 16 bit, B = 8 bit
// TODO: 2-address instructions. Mark ops as needing matching input/output regs.
var AMD64ops = []opData{ var AMD64ops = []opData{
// fp ops // fp ops
{name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
......
...@@ -4,6 +4,19 @@ ...@@ -4,6 +4,19 @@
package main package main
// Generic opcodes typically specify a width. The inputs and outputs
// of that op are the given number of bits wide. There is no notion of
// "sign", so Add32 can be used both for signed and unsigned 32-bit
// addition.
// Signed/unsigned is explicit with the extension ops
// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
// (e.g. the second argument to shifts is unsigned). If not mentioned,
// all args take signed inputs, or don't care whether their inputs
// are signed or unsigned.
// Unused portions of AuxInt are filled by sign-extending the used portion.
// Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
var genericOps = []opData{ var genericOps = []opData{
// 2-input arithmetic // 2-input arithmetic
// Types must be consistent with Go typing. Add, for example, must take two values // Types must be consistent with Go typing. Add, for example, must take two values
...@@ -15,7 +28,6 @@ var genericOps = []opData{ ...@@ -15,7 +28,6 @@ var genericOps = []opData{
{name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int. {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
{name: "Add32F", argLength: 2}, {name: "Add32F", argLength: 2},
{name: "Add64F", argLength: 2}, {name: "Add64F", argLength: 2},
// TODO: Add64C, Add128C
{name: "Sub8", argLength: 2}, // arg0 - arg1 {name: "Sub8", argLength: 2}, // arg0 - arg1
{name: "Sub16", argLength: 2}, {name: "Sub16", argLength: 2},
...@@ -35,8 +47,8 @@ var genericOps = []opData{ ...@@ -35,8 +47,8 @@ var genericOps = []opData{
{name: "Div32F", argLength: 2}, // arg0 / arg1 {name: "Div32F", argLength: 2}, // arg0 / arg1
{name: "Div64F", argLength: 2}, {name: "Div64F", argLength: 2},
{name: "Hmul8", argLength: 2}, // (arg0 * arg1) >> width {name: "Hmul8", argLength: 2}, // (arg0 * arg1) >> width, signed
{name: "Hmul8u", argLength: 2}, {name: "Hmul8u", argLength: 2}, // (arg0 * arg1) >> width, unsigned
{name: "Hmul16", argLength: 2}, {name: "Hmul16", argLength: 2},
{name: "Hmul16u", argLength: 2}, {name: "Hmul16u", argLength: 2},
{name: "Hmul32", argLength: 2}, {name: "Hmul32", argLength: 2},
...@@ -47,8 +59,8 @@ var genericOps = []opData{ ...@@ -47,8 +59,8 @@ var genericOps = []opData{
// Weird special instruction for strength reduction of divides. // Weird special instruction for strength reduction of divides.
{name: "Avg64u", argLength: 2}, // (uint64(arg0) + uint64(arg1)) / 2, correct to all 64 bits. {name: "Avg64u", argLength: 2}, // (uint64(arg0) + uint64(arg1)) / 2, correct to all 64 bits.
{name: "Div8", argLength: 2}, // arg0 / arg1 {name: "Div8", argLength: 2}, // arg0 / arg1, signed
{name: "Div8u", argLength: 2}, {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
{name: "Div16", argLength: 2}, {name: "Div16", argLength: 2},
{name: "Div16u", argLength: 2}, {name: "Div16u", argLength: 2},
{name: "Div32", argLength: 2}, {name: "Div32", argLength: 2},
...@@ -56,8 +68,8 @@ var genericOps = []opData{ ...@@ -56,8 +68,8 @@ var genericOps = []opData{
{name: "Div64", argLength: 2}, {name: "Div64", argLength: 2},
{name: "Div64u", argLength: 2}, {name: "Div64u", argLength: 2},
{name: "Mod8", argLength: 2}, // arg0 % arg1 {name: "Mod8", argLength: 2}, // arg0 % arg1, signed
{name: "Mod8u", argLength: 2}, {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
{name: "Mod16", argLength: 2}, {name: "Mod16", argLength: 2},
{name: "Mod16u", argLength: 2}, {name: "Mod16u", argLength: 2},
{name: "Mod32", argLength: 2}, {name: "Mod32", argLength: 2},
...@@ -81,6 +93,7 @@ var genericOps = []opData{ ...@@ -81,6 +93,7 @@ var genericOps = []opData{
{name: "Xor64", argLength: 2, commutative: true}, {name: "Xor64", argLength: 2, commutative: true},
// For shifts, AxB means the shifted value has A bits and the shift amount has B bits. // For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
// Shift amounts are considered unsigned.
{name: "Lsh8x8", argLength: 2}, // arg0 << arg1 {name: "Lsh8x8", argLength: 2}, // arg0 << arg1
{name: "Lsh8x16", argLength: 2}, {name: "Lsh8x16", argLength: 2},
{name: "Lsh8x32", argLength: 2}, {name: "Lsh8x32", argLength: 2},
...@@ -178,8 +191,8 @@ var genericOps = []opData{ ...@@ -178,8 +191,8 @@ var genericOps = []opData{
{name: "Neq32F", argLength: 2}, {name: "Neq32F", argLength: 2},
{name: "Neq64F", argLength: 2}, {name: "Neq64F", argLength: 2},
{name: "Less8", argLength: 2}, // arg0 < arg1 {name: "Less8", argLength: 2}, // arg0 < arg1, signed
{name: "Less8U", argLength: 2}, {name: "Less8U", argLength: 2}, // arg0 < arg1, unsigned
{name: "Less16", argLength: 2}, {name: "Less16", argLength: 2},
{name: "Less16U", argLength: 2}, {name: "Less16U", argLength: 2},
{name: "Less32", argLength: 2}, {name: "Less32", argLength: 2},
...@@ -189,8 +202,8 @@ var genericOps = []opData{ ...@@ -189,8 +202,8 @@ var genericOps = []opData{
{name: "Less32F", argLength: 2}, {name: "Less32F", argLength: 2},
{name: "Less64F", argLength: 2}, {name: "Less64F", argLength: 2},
{name: "Leq8", argLength: 2}, // arg0 <= arg1 {name: "Leq8", argLength: 2}, // arg0 <= arg1, signed
{name: "Leq8U", argLength: 2}, {name: "Leq8U", argLength: 2}, // arg0 <= arg1, unsigned
{name: "Leq16", argLength: 2}, {name: "Leq16", argLength: 2},
{name: "Leq16U", argLength: 2}, {name: "Leq16U", argLength: 2},
{name: "Leq32", argLength: 2}, {name: "Leq32", argLength: 2},
...@@ -200,8 +213,8 @@ var genericOps = []opData{ ...@@ -200,8 +213,8 @@ var genericOps = []opData{
{name: "Leq32F", argLength: 2}, {name: "Leq32F", argLength: 2},
{name: "Leq64F", argLength: 2}, {name: "Leq64F", argLength: 2},
{name: "Greater8", argLength: 2}, // arg0 > arg1 {name: "Greater8", argLength: 2}, // arg0 > arg1, signed
{name: "Greater8U", argLength: 2}, {name: "Greater8U", argLength: 2}, // arg0 > arg1, unsigned
{name: "Greater16", argLength: 2}, {name: "Greater16", argLength: 2},
{name: "Greater16U", argLength: 2}, {name: "Greater16U", argLength: 2},
{name: "Greater32", argLength: 2}, {name: "Greater32", argLength: 2},
...@@ -211,8 +224,8 @@ var genericOps = []opData{ ...@@ -211,8 +224,8 @@ var genericOps = []opData{
{name: "Greater32F", argLength: 2}, {name: "Greater32F", argLength: 2},
{name: "Greater64F", argLength: 2}, {name: "Greater64F", argLength: 2},
{name: "Geq8", argLength: 2}, // arg0 <= arg1 {name: "Geq8", argLength: 2}, // arg0 <= arg1, signed
{name: "Geq8U", argLength: 2}, {name: "Geq8U", argLength: 2}, // arg0 <= arg1, unsigned
{name: "Geq16", argLength: 2}, {name: "Geq16", argLength: 2},
{name: "Geq16U", argLength: 2}, {name: "Geq16U", argLength: 2},
{name: "Geq32", argLength: 2}, {name: "Geq32", argLength: 2},
...@@ -223,7 +236,7 @@ var genericOps = []opData{ ...@@ -223,7 +236,7 @@ var genericOps = []opData{
{name: "Geq64F", argLength: 2}, {name: "Geq64F", argLength: 2},
// 1-input ops // 1-input ops
{name: "Not", argLength: 1}, // !arg0 {name: "Not", argLength: 1}, // !arg0, boolean
{name: "Neg8", argLength: 1}, // -arg0 {name: "Neg8", argLength: 1}, // -arg0
{name: "Neg16", argLength: 1}, {name: "Neg16", argLength: 1},
...@@ -266,9 +279,9 @@ var genericOps = []opData{ ...@@ -266,9 +279,9 @@ var genericOps = []opData{
{name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
{name: "ConstString", aux: "String"}, // value is aux.(string) {name: "ConstString", aux: "String"}, // value is aux.(string)
{name: "ConstNil", typ: "BytePtr"}, // nil pointer {name: "ConstNil", typ: "BytePtr"}, // nil pointer
{name: "Const8", aux: "Int8"}, // value is low 8 bits of auxint {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits
{name: "Const16", aux: "Int16"}, // value is low 16 bits of auxint {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits
{name: "Const32", aux: "Int32"}, // value is low 32 bits of auxint {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
{name: "Const64", aux: "Int64"}, // value is auxint {name: "Const64", aux: "Int64"}, // value is auxint
{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly prepresentable as float 32 {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly prepresentable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint)) {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
...@@ -337,16 +350,16 @@ var genericOps = []opData{ ...@@ -337,16 +350,16 @@ var genericOps = []opData{
// Automatically inserted safety checks // Automatically inserted safety checks
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1 {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1 {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
{name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil, returns void. {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil, returns void.
// Pseudo-ops // Pseudo-ops
{name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem {name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem
{name: "GetClosurePtr"}, // get closure pointer from dedicated register {name: "GetClosurePtr"}, // get closure pointer from dedicated register
// Indexing operations // Indexing operations
{name: "ArrayIndex", aux: "Int64", argLength: 1}, // arg0=array, auxint=index. Returns a[i] {name: "ArrayIndex", aux: "Int64", argLength: 1}, // arg0=array, auxint=index. Returns a[i]
{name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
{name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers) {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
......
...@@ -111,13 +111,6 @@ func canMergeSym(x, y interface{}) bool { ...@@ -111,13 +111,6 @@ func canMergeSym(x, y interface{}) bool {
return x == nil || y == nil return x == nil || y == nil
} }
func inBounds8(idx, len int64) bool { return int8(idx) >= 0 && int8(idx) < int8(len) }
func inBounds16(idx, len int64) bool { return int16(idx) >= 0 && int16(idx) < int16(len) }
func inBounds32(idx, len int64) bool { return int32(idx) >= 0 && int32(idx) < int32(len) }
func inBounds64(idx, len int64) bool { return idx >= 0 && idx < len }
func sliceInBounds32(idx, len int64) bool { return int32(idx) >= 0 && int32(idx) <= int32(len) }
func sliceInBounds64(idx, len int64) bool { return idx >= 0 && idx <= len }
// nlz returns the number of leading zeros. // nlz returns the number of leading zeros.
func nlz(x int64) int64 { func nlz(x int64) int64 {
// log2(0) == 1, so nlz(0) == 64 // log2(0) == 1, so nlz(0) == 64
......
...@@ -840,7 +840,7 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { ...@@ -840,7 +840,7 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool {
} }
// match: (ADDBconst [c] (MOVBconst [d])) // match: (ADDBconst [c] (MOVBconst [d]))
// cond: // cond:
// result: (MOVBconst [c+d]) // result: (MOVBconst [int64(int8(c+d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -849,12 +849,12 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { ...@@ -849,12 +849,12 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVBconst) v.reset(OpAMD64MOVBconst)
v.AuxInt = c + d v.AuxInt = int64(int8(c + d))
return true return true
} }
// match: (ADDBconst [c] (ADDBconst [d] x)) // match: (ADDBconst [c] (ADDBconst [d] x))
// cond: // cond:
// result: (ADDBconst [c+d] x) // result: (ADDBconst [int64(int8(c+d))] x)
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -864,7 +864,7 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { ...@@ -864,7 +864,7 @@ func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool {
d := v_0.AuxInt d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
v.reset(OpAMD64ADDBconst) v.reset(OpAMD64ADDBconst)
v.AuxInt = c + d v.AuxInt = int64(int8(c + d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
...@@ -939,7 +939,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { ...@@ -939,7 +939,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
} }
// match: (ADDLconst [c] (MOVLconst [d])) // match: (ADDLconst [c] (MOVLconst [d]))
// cond: // cond:
// result: (MOVLconst [c+d]) // result: (MOVLconst [int64(int32(c+d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -948,12 +948,12 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { ...@@ -948,12 +948,12 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = c + d v.AuxInt = int64(int32(c + d))
return true return true
} }
// match: (ADDLconst [c] (ADDLconst [d] x)) // match: (ADDLconst [c] (ADDLconst [d] x))
// cond: // cond:
// result: (ADDLconst [c+d] x) // result: (ADDLconst [int64(int32(c+d))] x)
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -963,7 +963,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { ...@@ -963,7 +963,7 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool {
d := v_0.AuxInt d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
v.reset(OpAMD64ADDLconst) v.reset(OpAMD64ADDLconst)
v.AuxInt = c + d v.AuxInt = int64(int32(c + d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
...@@ -1461,7 +1461,7 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { ...@@ -1461,7 +1461,7 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool {
} }
// match: (ADDWconst [c] (MOVWconst [d])) // match: (ADDWconst [c] (MOVWconst [d]))
// cond: // cond:
// result: (MOVWconst [c+d]) // result: (MOVWconst [int64(int16(c+d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -1470,12 +1470,12 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { ...@@ -1470,12 +1470,12 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVWconst) v.reset(OpAMD64MOVWconst)
v.AuxInt = c + d v.AuxInt = int64(int16(c + d))
return true return true
} }
// match: (ADDWconst [c] (ADDWconst [d] x)) // match: (ADDWconst [c] (ADDWconst [d] x))
// cond: // cond:
// result: (ADDWconst [c+d] x) // result: (ADDWconst [int64(int16(c+d))] x)
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -1485,7 +1485,7 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { ...@@ -1485,7 +1485,7 @@ func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool {
d := v_0.AuxInt d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
v.reset(OpAMD64ADDWconst) v.reset(OpAMD64ADDWconst)
v.AuxInt = c + d v.AuxInt = int64(int16(c + d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
...@@ -9218,7 +9218,7 @@ func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { ...@@ -9218,7 +9218,7 @@ func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool {
_ = b _ = b
// match: (MULBconst [c] (MOVBconst [d])) // match: (MULBconst [c] (MOVBconst [d]))
// cond: // cond:
// result: (MOVBconst [c*d]) // result: (MOVBconst [int64(int8(c*d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -9227,7 +9227,7 @@ func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { ...@@ -9227,7 +9227,7 @@ func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVBconst) v.reset(OpAMD64MOVBconst)
v.AuxInt = c * d v.AuxInt = int64(int8(c * d))
return true return true
} }
return false return false
...@@ -9272,7 +9272,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { ...@@ -9272,7 +9272,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
_ = b _ = b
// match: (MULLconst [c] (MOVLconst [d])) // match: (MULLconst [c] (MOVLconst [d]))
// cond: // cond:
// result: (MOVLconst [c*d]) // result: (MOVLconst [int64(int32(c*d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -9281,7 +9281,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { ...@@ -9281,7 +9281,7 @@ func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = c * d v.AuxInt = int64(int32(c * d))
return true return true
} }
return false return false
...@@ -9491,7 +9491,7 @@ func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { ...@@ -9491,7 +9491,7 @@ func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool {
_ = b _ = b
// match: (MULWconst [c] (MOVWconst [d])) // match: (MULWconst [c] (MOVWconst [d]))
// cond: // cond:
// result: (MOVWconst [c*d]) // result: (MOVWconst [int64(int16(c*d))])
for { for {
c := v.AuxInt c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
...@@ -9500,7 +9500,7 @@ func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { ...@@ -9500,7 +9500,7 @@ func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool {
} }
d := v_0.AuxInt d := v_0.AuxInt
v.reset(OpAMD64MOVWconst) v.reset(OpAMD64MOVWconst)
v.AuxInt = c * d v.AuxInt = int64(int16(c * d))
return true return true
} }
return false return false
...@@ -10096,7 +10096,7 @@ func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { ...@@ -10096,7 +10096,7 @@ func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool {
_ = b _ = b
// match: (NEGB (MOVBconst [c])) // match: (NEGB (MOVBconst [c]))
// cond: // cond:
// result: (MOVBconst [-c]) // result: (MOVBconst [int64(int8(-c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVBconst { if v_0.Op != OpAMD64MOVBconst {
...@@ -10104,7 +10104,7 @@ func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { ...@@ -10104,7 +10104,7 @@ func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool {
} }
c := v_0.AuxInt c := v_0.AuxInt
v.reset(OpAMD64MOVBconst) v.reset(OpAMD64MOVBconst)
v.AuxInt = -c v.AuxInt = int64(int8(-c))
return true return true
} }
return false return false
...@@ -10114,7 +10114,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { ...@@ -10114,7 +10114,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
_ = b _ = b
// match: (NEGL (MOVLconst [c])) // match: (NEGL (MOVLconst [c]))
// cond: // cond:
// result: (MOVLconst [-c]) // result: (MOVLconst [int64(int32(-c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst { if v_0.Op != OpAMD64MOVLconst {
...@@ -10122,7 +10122,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { ...@@ -10122,7 +10122,7 @@ func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool {
} }
c := v_0.AuxInt c := v_0.AuxInt
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = -c v.AuxInt = int64(int32(-c))
return true return true
} }
return false return false
...@@ -10150,7 +10150,7 @@ func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { ...@@ -10150,7 +10150,7 @@ func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool {
_ = b _ = b
// match: (NEGW (MOVWconst [c])) // match: (NEGW (MOVWconst [c]))
// cond: // cond:
// result: (MOVWconst [-c]) // result: (MOVWconst [int64(int16(-c))])
for { for {
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVWconst { if v_0.Op != OpAMD64MOVWconst {
...@@ -10158,7 +10158,7 @@ func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { ...@@ -10158,7 +10158,7 @@ func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool {
} }
c := v_0.AuxInt c := v_0.AuxInt
v.reset(OpAMD64MOVWconst) v.reset(OpAMD64MOVWconst)
v.AuxInt = -c v.AuxInt = int64(int16(-c))
return true return true
} }
return false return false
...@@ -14591,33 +14591,33 @@ func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { ...@@ -14591,33 +14591,33 @@ func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (SUBBconst [c] (MOVBconst [d])) // match: (SUBBconst (MOVBconst [d]) [c])
// cond: // cond:
// result: (MOVBconst [d-c]) // result: (MOVBconst [int64(int8(d-c))])
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVBconst { if v_0.Op != OpAMD64MOVBconst {
break break
} }
d := v_0.AuxInt d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64MOVBconst) v.reset(OpAMD64MOVBconst)
v.AuxInt = d - c v.AuxInt = int64(int8(d - c))
return true return true
} }
// match: (SUBBconst [c] (SUBBconst [d] x)) // match: (SUBBconst (SUBBconst x [d]) [c])
// cond: // cond:
// result: (ADDBconst [-c-d] x) // result: (ADDBconst [int64(int8(-c-d))] x)
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBBconst { if v_0.Op != OpAMD64SUBBconst {
break break
} }
d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64ADDBconst) v.reset(OpAMD64ADDBconst)
v.AuxInt = -c - d v.AuxInt = int64(int8(-c - d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
...@@ -14689,33 +14689,33 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { ...@@ -14689,33 +14689,33 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (SUBLconst [c] (MOVLconst [d])) // match: (SUBLconst (MOVLconst [d]) [c])
// cond: // cond:
// result: (MOVLconst [d-c]) // result: (MOVLconst [int64(int32(d-c))])
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVLconst { if v_0.Op != OpAMD64MOVLconst {
break break
} }
d := v_0.AuxInt d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64MOVLconst) v.reset(OpAMD64MOVLconst)
v.AuxInt = d - c v.AuxInt = int64(int32(d - c))
return true return true
} }
// match: (SUBLconst [c] (SUBLconst [d] x)) // match: (SUBLconst (SUBLconst x [d]) [c])
// cond: // cond:
// result: (ADDLconst [-c-d] x) // result: (ADDLconst [int64(int32(-c-d))] x)
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBLconst { if v_0.Op != OpAMD64SUBLconst {
break break
} }
d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64ADDLconst) v.reset(OpAMD64ADDLconst)
v.AuxInt = -c - d v.AuxInt = int64(int32(-c - d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
...@@ -14792,31 +14792,31 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { ...@@ -14792,31 +14792,31 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (SUBQconst [c] (MOVQconst [d])) // match: (SUBQconst (MOVQconst [d]) [c])
// cond: // cond:
// result: (MOVQconst [d-c]) // result: (MOVQconst [d-c])
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVQconst { if v_0.Op != OpAMD64MOVQconst {
break break
} }
d := v_0.AuxInt d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64MOVQconst) v.reset(OpAMD64MOVQconst)
v.AuxInt = d - c v.AuxInt = d - c
return true return true
} }
// match: (SUBQconst [c] (SUBQconst [d] x)) // match: (SUBQconst (SUBQconst x [d]) [c])
// cond: is32Bit(-c-d) // cond: is32Bit(-c-d)
// result: (ADDQconst [-c-d] x) // result: (ADDQconst [-c-d] x)
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBQconst { if v_0.Op != OpAMD64SUBQconst {
break break
} }
d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
d := v_0.AuxInt
c := v.AuxInt
if !(is32Bit(-c - d)) { if !(is32Bit(-c - d)) {
break break
} }
...@@ -14893,33 +14893,33 @@ func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { ...@@ -14893,33 +14893,33 @@ func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool {
v.AddArg(x) v.AddArg(x)
return true return true
} }
// match: (SUBWconst [c] (MOVWconst [d])) // match: (SUBWconst (MOVWconst [d]) [c])
// cond: // cond:
// result: (MOVWconst [d-c]) // result: (MOVWconst [int64(int16(d-c))])
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64MOVWconst { if v_0.Op != OpAMD64MOVWconst {
break break
} }
d := v_0.AuxInt d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64MOVWconst) v.reset(OpAMD64MOVWconst)
v.AuxInt = d - c v.AuxInt = int64(int16(d - c))
return true return true
} }
// match: (SUBWconst [c] (SUBWconst [d] x)) // match: (SUBWconst (SUBWconst x [d]) [c])
// cond: // cond:
// result: (ADDWconst [-c-d] x) // result: (ADDWconst [int64(int16(-c-d))] x)
for { for {
c := v.AuxInt
v_0 := v.Args[0] v_0 := v.Args[0]
if v_0.Op != OpAMD64SUBWconst { if v_0.Op != OpAMD64SUBWconst {
break break
} }
d := v_0.AuxInt
x := v_0.Args[0] x := v_0.Args[0]
d := v_0.AuxInt
c := v.AuxInt
v.reset(OpAMD64ADDWconst) v.reset(OpAMD64ADDWconst)
v.AuxInt = -c - d v.AuxInt = int64(int16(-c - d))
v.AddArg(x) v.AddArg(x)
return true return true
} }
......
...@@ -81,24 +81,6 @@ func (v *Value) AuxInt32() int32 { ...@@ -81,24 +81,6 @@ func (v *Value) AuxInt32() int32 {
return int32(v.AuxInt) return int32(v.AuxInt)
} }
// AuxInt2Int64 is used to sign extend the lower bits of AuxInt according to
// the size of AuxInt specified in the opcode table.
func (v *Value) AuxInt2Int64() int64 {
switch opcodeTable[v.Op].auxType {
case auxInt64:
return v.AuxInt
case auxInt32:
return int64(int32(v.AuxInt))
case auxInt16:
return int64(int16(v.AuxInt))
case auxInt8:
return int64(int8(v.AuxInt))
default:
v.Fatalf("op %s doesn't have an aux int field", v.Op)
return -1
}
}
func (v *Value) AuxFloat() float64 { func (v *Value) AuxFloat() float64 {
if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 { if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 {
v.Fatalf("op %s doesn't have a float aux field", v.Op) v.Fatalf("op %s doesn't have a float aux field", v.Op)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment