Commit 9514285d authored by Michael Hudson-Doyle's avatar Michael Hudson-Doyle

cmd/compile/internal/x86: avoid CX in a couple of places in the int64 code

I want to use CX as a scratch register in position independent code and these
uses are easy to remove.

Change-Id: I9e3cb470d7f0000d85786c30bd769d9ec86d532a
Reviewed-on: https://go-review.googlesource.com/16382Reviewed-by: 's avatarIan Lance Taylor <iant@golang.org>
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent cf125a36
...@@ -113,19 +113,18 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -113,19 +113,18 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(x86.ASUBL, &lo2, &ax) gins(x86.ASUBL, &lo2, &ax)
gins(x86.ASBBL, &hi2, &dx) gins(x86.ASBBL, &hi2, &dx)
// let's call the next two EX and FX.
case gc.OMUL: case gc.OMUL:
var ex gc.Node // let's call the next three EX, FX and GX
var ex, fx, gx gc.Node
gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil) gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
var fx gc.Node
gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil) gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&gx, gc.Types[gc.TPTR32], nil)
// load args into DX:AX and EX:CX. // load args into DX:AX and EX:GX.
gins(x86.AMOVL, &lo1, &ax) gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx) gins(x86.AMOVL, &hi1, &dx)
gins(x86.AMOVL, &lo2, &cx) gins(x86.AMOVL, &lo2, &gx)
gins(x86.AMOVL, &hi2, &ex) gins(x86.AMOVL, &hi2, &ex)
// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply. // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
...@@ -133,25 +132,26 @@ func cgen64(n *gc.Node, res *gc.Node) { ...@@ -133,25 +132,26 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(x86.AORL, &ex, &fx) gins(x86.AORL, &ex, &fx)
p1 := gc.Gbranch(x86.AJNE, nil, 0) p1 := gc.Gbranch(x86.AJNE, nil, 0)
gins(x86.AMULL, &cx, nil) // implicit &ax gins(x86.AMULL, &gx, nil) // implicit &ax
p2 := gc.Gbranch(obj.AJMP, nil, 0) p2 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc) gc.Patch(p1, gc.Pc)
// full 64x64 -> 64, from 32x32 -> 64. // full 64x64 -> 64, from 32x32 -> 64.
gins(x86.AIMULL, &cx, &dx) gins(x86.AIMULL, &gx, &dx)
gins(x86.AMOVL, &ax, &fx) gins(x86.AMOVL, &ax, &fx)
gins(x86.AIMULL, &ex, &fx) gins(x86.AIMULL, &ex, &fx)
gins(x86.AADDL, &dx, &fx) gins(x86.AADDL, &dx, &fx)
gins(x86.AMOVL, &cx, &dx) gins(x86.AMOVL, &gx, &dx)
gins(x86.AMULL, &dx, nil) // implicit &ax gins(x86.AMULL, &dx, nil) // implicit &ax
gins(x86.AADDL, &fx, &dx) gins(x86.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc) gc.Patch(p2, gc.Pc)
gc.Regfree(&ex) gc.Regfree(&ex)
gc.Regfree(&fx) gc.Regfree(&fx)
gc.Regfree(&gx)
// We only rotate by a constant c in [0,64). // We only rotate by a constant c in [0,64).
// if c >= 32: // if c >= 32:
// lo, hi = hi, lo // lo, hi = hi, lo
// c -= 32 // c -= 32
......
...@@ -952,15 +952,13 @@ func gmove(f *gc.Node, t *gc.Node) { ...@@ -952,15 +952,13 @@ func gmove(f *gc.Node, t *gc.Node) {
} else { } else {
// Implementation of conversion-free x = y for int64 or uint64 x. // Implementation of conversion-free x = y for int64 or uint64 x.
// This is generated by the code that copies small values out of closures, // This is generated by the code that copies small values out of closures,
// and that code has DX live, so avoid DX and use CX instead. // and that code has DX live, so avoid DX and just use AX twice.
var r1 gc.Node var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX) gc.Nodreg(&r1, gc.Types[gc.TUINT32], x86.REG_AX)
var r2 gc.Node
gc.Nodreg(&r2, gc.Types[gc.TUINT32], x86.REG_CX)
gins(x86.AMOVL, &flo, &r1) gins(x86.AMOVL, &flo, &r1)
gins(x86.AMOVL, &fhi, &r2)
gins(x86.AMOVL, &r1, &tlo) gins(x86.AMOVL, &r1, &tlo)
gins(x86.AMOVL, &r2, &thi) gins(x86.AMOVL, &fhi, &r1)
gins(x86.AMOVL, &r1, &thi)
} }
splitclean() splitclean()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment