Commit 4a7aba77 authored by Keith Randall's avatar Keith Randall

cmd/compile: better job of naming compound types

Compound AUTO types weren't named previously.  That was because live
variable analysis (plive.go) doesn't handle spilling to compound types.
It can't handle them because there is no valid place to put VARDEFs when
regalloc is spilling compound types.

compound types = multiword builtin types: complex, string, slice, and
interface.

Instead, we split named AUTOs into individual one-word variables.  For
example, a string s gets split into a byte ptr s.ptr and an integer
s.len.  Those two variables can be spilled to / restored from
independently.  As a result, live variable analysis can handle them
because they are one-word objects.

This CL will change how AUTOs are described in DWARF information.
Consider the code:

func f(s string, i int) int {
    x := s[i:i+5]
    g()
    return lookup(x)
}

The old compiler would spill x to two consecutive slots on the stack,
both named x (at offsets 0 and 8).  The new compiler spills the pointer
of x to a slot named x.ptr.  It doesn't spill x.len at all, as it is a
constant (5) and can be rematerialized for the call to lookup.

So compound objects may not be spilled in their entirety, and even if
they are they won't necessarily be contiguous.  Such is the price of
optimization.

Re-enable live variable analysis tests.  One test remains disabled, it
fails because of #14904.

Change-Id: I8ef2b5ab91e43a0d2136bfc231c05d100ec0b801
Reviewed-on: https://go-review.googlesource.com/21233
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarDavid Chase <drchase@google.com>
parent e55896b9
......@@ -3762,12 +3762,6 @@ func (s *state) addNamedValue(n *Node, v *ssa.Value) {
// Don't track autotmp_ variables.
return
}
if n.Class == PAUTO && (v.Type.IsString() || v.Type.IsSlice() || v.Type.IsInterface()) {
// TODO: can't handle auto compound objects with pointers yet.
// The live variable analysis barfs because we don't put VARDEF
// pseudos in the right place when we spill to these nodes.
return
}
if n.Class == PPARAMOUT {
// Don't track named output values. This prevents return values
// from being assigned too early. See #14591 and #14762. TODO: allow this.
......@@ -4175,6 +4169,96 @@ func (e *ssaExport) Auto(t ssa.Type) ssa.GCNode {
return n
}
func (e *ssaExport) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(Types[TUINT8])
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this string up into two separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off}, ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
t := Ptrto(Types[TUINT8])
if n.Class == PAUTO && !n.Addrtaken {
// Split this interface up into two separate variables.
f := ".itab"
if isnilinter(n.Type) {
f = ".type"
}
c := e.namedAuto(n.Sym.Name+f, t)
d := e.namedAuto(n.Sym.Name+".data", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + int64(Widthptr)}
}
func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
ptrType := Ptrto(n.Type.Type)
lenType := Types[TINT]
if n.Class == PAUTO && !n.Addrtaken {
// Split this slice up into three separate variables.
p := e.namedAuto(n.Sym.Name+".ptr", ptrType)
l := e.namedAuto(n.Sym.Name+".len", lenType)
c := e.namedAuto(n.Sym.Name+".cap", lenType)
return ssa.LocalSlot{p, ptrType, 0}, ssa.LocalSlot{l, lenType, 0}, ssa.LocalSlot{c, lenType, 0}
}
// Return the three parts of the larger variable.
return ssa.LocalSlot{n, ptrType, name.Off},
ssa.LocalSlot{n, lenType, name.Off + int64(Widthptr)},
ssa.LocalSlot{n, lenType, name.Off + int64(2*Widthptr)}
}
func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
s := name.Type.Size() / 2
var t *Type
if s == 8 {
t = Types[TFLOAT64]
} else {
t = Types[TFLOAT32]
}
if n.Class == PAUTO && !n.Addrtaken {
// Split this complex up into two separate variables.
c := e.namedAuto(n.Sym.Name+".real", t)
d := e.namedAuto(n.Sym.Name+".imag", t)
return ssa.LocalSlot{c, t, 0}, ssa.LocalSlot{d, t, 0}
}
// Return the two parts of the larger variable.
return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + s}
}
// namedAuto returns a new AUTO variable with the given name and type.
func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode {
t := typ.(*Type)
s := Lookup(name)
n := Nod(ONAME, nil, nil)
s.Def = n
s.Def.Used = true
n.Sym = s
n.Type = t
n.Class = PAUTO
n.Addable = true
n.Ullman = 1
n.Esc = EscNever
n.Xoffset = 0
n.Name.Curfn = Curfn
Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
dowidth(t)
e.mustImplement = true
return n
}
func (e *ssaExport) CanSSA(t ssa.Type) bool {
return canSSAType(t.(*Type))
}
......
......@@ -97,6 +97,13 @@ type Frontend interface {
// The SSA compiler uses this function to allocate space for spills.
Auto(Type) GCNode
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
SplitString(LocalSlot) (LocalSlot, LocalSlot)
SplitInterface(LocalSlot) (LocalSlot, LocalSlot)
SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
// Line returns a string describing the given line number.
Line(int32) string
}
......
......@@ -31,8 +31,7 @@ func decomposeBuiltIn(f *Func) {
} else {
elemType = f.Config.fe.TypeFloat32()
}
rName := LocalSlot{name.N, elemType, name.Off}
iName := LocalSlot{name.N, elemType, name.Off + elemType.Size()}
rName, iName := f.Config.fe.SplitComplex(name)
f.Names = append(f.Names, rName, iName)
for _, v := range f.NamedValues[name] {
r := v.Block.NewValue1(v.Line, OpComplexReal, elemType, v)
......@@ -43,8 +42,7 @@ func decomposeBuiltIn(f *Func) {
case t.IsString():
ptrType := f.Config.fe.TypeBytePtr()
lenType := f.Config.fe.TypeInt()
ptrName := LocalSlot{name.N, ptrType, name.Off}
lenName := LocalSlot{name.N, lenType, name.Off + f.Config.PtrSize}
ptrName, lenName := f.Config.fe.SplitString(name)
f.Names = append(f.Names, ptrName, lenName)
for _, v := range f.NamedValues[name] {
ptr := v.Block.NewValue1(v.Line, OpStringPtr, ptrType, v)
......@@ -55,9 +53,7 @@ func decomposeBuiltIn(f *Func) {
case t.IsSlice():
ptrType := f.Config.fe.TypeBytePtr()
lenType := f.Config.fe.TypeInt()
ptrName := LocalSlot{name.N, ptrType, name.Off}
lenName := LocalSlot{name.N, lenType, name.Off + f.Config.PtrSize}
capName := LocalSlot{name.N, lenType, name.Off + 2*f.Config.PtrSize}
ptrName, lenName, capName := f.Config.fe.SplitSlice(name)
f.Names = append(f.Names, ptrName, lenName, capName)
for _, v := range f.NamedValues[name] {
ptr := v.Block.NewValue1(v.Line, OpSlicePtr, ptrType, v)
......@@ -69,8 +65,7 @@ func decomposeBuiltIn(f *Func) {
}
case t.IsInterface():
ptrType := f.Config.fe.TypeBytePtr()
typeName := LocalSlot{name.N, ptrType, name.Off}
dataName := LocalSlot{name.N, ptrType, name.Off + f.Config.PtrSize}
typeName, dataName := f.Config.fe.SplitInterface(name)
f.Names = append(f.Names, typeName, dataName)
for _, v := range f.NamedValues[name] {
typ := v.Block.NewValue1(v.Line, OpITab, ptrType, v)
......
......@@ -31,6 +31,23 @@ func (DummyFrontend) StringData(s string) interface{} {
func (DummyFrontend) Auto(t Type) GCNode {
return nil
}
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeInt(), s.Off + 8}
}
func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeBytePtr(), s.Off + 8}
}
func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
return LocalSlot{s.N, s.Type.ElemType().PtrTo(), s.Off},
LocalSlot{s.N, d.TypeInt(), s.Off + 8},
LocalSlot{s.N, d.TypeInt(), s.Off + 16}
}
func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
if s.Type.Size() == 16 {
return LocalSlot{s.N, d.TypeFloat64(), s.Off}, LocalSlot{s.N, d.TypeFloat64(), s.Off + 8}
}
return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4}
}
func (DummyFrontend) Line(line int32) string {
return "unknown.go:0"
}
......
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains rules to decompose builtin compound types
// (complex,string,slice,interface) into their constituent
// types. These rules work together with the decomposeBuiltIn
// pass which handles phis of these types.
// complex ops
(ComplexReal (ComplexMake real _ )) -> real
(ComplexImag (ComplexMake _ imag )) -> imag
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
(Load <config.fe.TypeFloat32()> ptr mem)
(Load <config.fe.TypeFloat32()>
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr)
mem)
)
(Store [8] dst (ComplexMake real imag) mem) ->
(Store [4]
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst)
imag
(Store [4] dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
(Load <config.fe.TypeFloat64()> ptr mem)
(Load <config.fe.TypeFloat64()>
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr)
mem)
)
(Store [16] dst (ComplexMake real imag) mem) ->
(Store [8]
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst)
imag
(Store [8] dst real mem))
// string ops
(StringPtr (StringMake ptr _)) -> ptr
(StringLen (StringMake _ len)) -> len
(Load <t> ptr mem) && t.IsString() ->
(StringMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem))
(Store [2*config.PtrSize] dst (StringMake ptr len) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store [config.PtrSize] dst ptr mem))
// slice ops
(SlicePtr (SliceMake ptr _ _ )) -> ptr
(SliceLen (SliceMake _ len _)) -> len
(SliceCap (SliceMake _ _ cap)) -> cap
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <t.ElemType().PtrTo()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
mem))
(Store [3*config.PtrSize] dst (SliceMake ptr len cap) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
cap
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store [config.PtrSize] dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
(IData (IMake _ data)) -> data
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeBytePtr()>
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
mem))
(Store [2*config.PtrSize] dst (IMake itab data) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
data
(Store [config.PtrSize] dst itab mem))
......@@ -444,7 +444,6 @@
(EqSlice x y) -> (EqPtr (SlicePtr x) (SlicePtr y))
(NeqSlice x y) -> (NeqPtr (SlicePtr x) (SlicePtr y))
// Load of store of same address, with compatibly typed value and same size
(Load <t1> p1 (Store [w] p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && w == t1.Size() -> x
......@@ -452,7 +451,6 @@
(OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b])
(OffPtr p [0]) && v.Type.Compare(p.Type) == CMPeq -> p
// indexing operations
// Note: bounds check has already been done
(ArrayIndex <t> [0] x:(Load ptr mem)) -> @x.Block (Load <t> ptr mem)
......@@ -523,40 +521,14 @@
f1
(Store [t.FieldType(0).Size()] dst f0 mem))))
// complex ops
(ComplexReal (ComplexMake real _ )) -> real
(ComplexImag (ComplexMake _ imag )) -> imag
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
(ComplexMake
(Load <config.fe.TypeFloat32()> ptr mem)
(Load <config.fe.TypeFloat32()>
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr)
mem)
)
(Store [8] dst (ComplexMake real imag) mem) ->
(Store [4]
(OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst)
imag
(Store [4] dst real mem))
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
(ComplexMake
(Load <config.fe.TypeFloat64()> ptr mem)
(Load <config.fe.TypeFloat64()>
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr)
mem)
)
(Store [16] dst (ComplexMake real imag) mem) ->
(Store [8]
(OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst)
imag
(Store [8] dst real mem))
// un-SSAable values use mem->mem copies
(Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) -> (Move [size] dst src mem)
(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) -> (Move [size] dst src (VarDef {x} mem))
// string ops
// Decomposing StringMake and lowering of StringPtr and StringLen
// happens in a later pass, dec, so that these operations are available
// to otherpasses for optimizations.
// to other passes for optimizations.
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
......@@ -573,81 +545,32 @@
(Addr <config.fe.TypeBytePtr()> {config.fe.StringData(s.(string))}
(SB))
(Const64 <config.fe.TypeInt()> [int64(len(s.(string)))]))
(Load <t> ptr mem) && t.IsString() ->
(StringMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem))
(Store [2*config.PtrSize] dst (StringMake ptr len) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store [config.PtrSize] dst ptr mem))
// slice ops
// Decomposing SliceMake, and lowering of SlicePtr, SliceLen, and SliceCap
// happens in a later pass, dec, so that these operations are available
// to other passes for optimizations.
(SlicePtr (SliceMake (Const64 <t> [c]) _ _)) -> (Const64 <t> [c])
// Only a few slice rules are provided here. See dec.rules for
// a more comprehensive set.
(SliceLen (SliceMake _ (Const64 <t> [c]) _)) -> (Const64 <t> [c])
(SliceCap (SliceMake _ _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(SlicePtr (SliceMake (SlicePtr x) _ _)) -> (SlicePtr x)
(SliceLen (SliceMake _ (SliceLen x) _)) -> (SliceLen x)
(SliceCap (SliceMake _ _ (SliceCap x))) -> (SliceCap x)
(SliceCap (SliceMake _ _ (SliceLen x))) -> (SliceLen x)
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <config.fe.TypeBytePtr()>)
(ConstNil <v.Type.ElemType().PtrTo()>)
(Const32 <config.fe.TypeInt()> [0])
(Const32 <config.fe.TypeInt()> [0]))
(ConstSlice) && config.PtrSize == 8 ->
(SliceMake
(ConstNil <config.fe.TypeBytePtr()>)
(ConstNil <v.Type.ElemType().PtrTo()>)
(Const64 <config.fe.TypeInt()> [0])
(Const64 <config.fe.TypeInt()> [0]))
(Load <t> ptr mem) && t.IsSlice() ->
(SliceMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
mem)
(Load <config.fe.TypeInt()>
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
mem))
(Store [3*config.PtrSize] dst (SliceMake ptr len cap) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
cap
(Store [config.PtrSize]
(OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst)
len
(Store [config.PtrSize] dst ptr mem)))
// interface ops
(ITab (IMake itab _)) -> itab
(IData (IMake _ data)) -> data
(ConstInterface) ->
(IMake
(ConstNil <config.fe.TypeBytePtr()>)
(ConstNil <config.fe.TypeBytePtr()>))
(Load <t> ptr mem) && t.IsInterface() ->
(IMake
(Load <config.fe.TypeBytePtr()> ptr mem)
(Load <config.fe.TypeBytePtr()>
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
mem))
(Store [2*config.PtrSize] dst (IMake itab data) mem) ->
(Store [config.PtrSize]
(OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
data
(Store [config.PtrSize] dst itab mem))
// un-SSAable values use mem->mem copies
(Store [size] dst (Load <t> src mem) mem) && !config.fe.CanSSA(t) -> (Move [size] dst src mem)
(Store [size] dst (Load <t> src mem) (VarDef {x} mem)) && !config.fe.CanSSA(t) -> (Move [size] dst src (VarDef {x} mem))
(Check (NilCheck (GetG _) _) next) -> (Plain nil next)
......@@ -668,7 +591,7 @@
(Arg {n} [off]) && v.Type.IsSlice() ->
(SliceMake
(Arg <config.fe.TypeBytePtr()> {n} [off])
(Arg <v.Type.ElemType().PtrTo()> {n} [off])
(Arg <config.fe.TypeInt()> {n} [off+config.PtrSize])
(Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
......
......@@ -8,12 +8,24 @@ import "math"
var _ = math.MinInt8 // in case not otherwise used
func rewriteValuedec(v *Value, config *Config) bool {
switch v.Op {
case OpComplexImag:
return rewriteValuedec_OpComplexImag(v, config)
case OpComplexReal:
return rewriteValuedec_OpComplexReal(v, config)
case OpIData:
return rewriteValuedec_OpIData(v, config)
case OpITab:
return rewriteValuedec_OpITab(v, config)
case OpLoad:
return rewriteValuedec_OpLoad(v, config)
case OpSliceCap:
return rewriteValuedec_OpSliceCap(v, config)
case OpSliceLen:
return rewriteValuedec_OpSliceLen(v, config)
case OpSlicePtr:
return rewriteValuedec_OpSlicePtr(v, config)
case OpStore:
return rewriteValuedec_OpStore(v, config)
case OpStringLen:
return rewriteValuedec_OpStringLen(v, config)
case OpStringPtr:
......@@ -21,6 +33,214 @@ func rewriteValuedec(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpComplexImag(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ComplexImag (ComplexMake _ imag ))
// cond:
// result: imag
for {
v_0 := v.Args[0]
if v_0.Op != OpComplexMake {
break
}
imag := v_0.Args[1]
v.reset(OpCopy)
v.Type = imag.Type
v.AddArg(imag)
return true
}
return false
}
func rewriteValuedec_OpComplexReal(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ComplexReal (ComplexMake real _ ))
// cond:
// result: real
for {
v_0 := v.Args[0]
if v_0.Op != OpComplexMake {
break
}
real := v_0.Args[0]
v.reset(OpCopy)
v.Type = real.Type
v.AddArg(real)
return true
}
return false
}
func rewriteValuedec_OpIData(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (IData (IMake _ data))
// cond:
// result: data
for {
v_0 := v.Args[0]
if v_0.Op != OpIMake {
break
}
data := v_0.Args[1]
v.reset(OpCopy)
v.Type = data.Type
v.AddArg(data)
return true
}
return false
}
func rewriteValuedec_OpITab(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ITab (IMake itab _))
// cond:
// result: itab
for {
v_0 := v.Args[0]
if v_0.Op != OpIMake {
break
}
itab := v_0.Args[0]
v.reset(OpCopy)
v.Type = itab.Type
v.AddArg(itab)
return true
}
return false
}
func rewriteValuedec_OpLoad(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
// result: (ComplexMake (Load <config.fe.TypeFloat32()> ptr mem) (Load <config.fe.TypeFloat32()> (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsComplex() && t.Size() == 8) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
// result: (ComplexMake (Load <config.fe.TypeFloat64()> ptr mem) (Load <config.fe.TypeFloat64()> (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsComplex() && t.Size() == 16) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
// result: (StringMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsString()) {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
// result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsSlice()) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpLoad, t.ElemType().PtrTo())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v3 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v4 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
v3.AddArg(mem)
v.AddArg(v3)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
// result: (IMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeBytePtr()> (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsInterface()) {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
return false
}
func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -78,6 +298,170 @@ func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
}
return false
}
func rewriteValuedec_OpStore(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (Store [8] dst (ComplexMake real imag) mem)
// cond:
// result: (Store [4] (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst) imag (Store [4] dst real mem))
for {
if v.AuxInt != 8 {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpComplexMake {
break
}
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = 4
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [16] dst (ComplexMake real imag) mem)
// cond:
// result: (Store [8] (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst) imag (Store [8] dst real mem))
for {
if v.AuxInt != 16 {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpComplexMake {
break
}
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [2*config.PtrSize] dst (StringMake ptr len) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store [config.PtrSize] dst ptr mem))
for {
if v.AuxInt != 2*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpStringMake {
break
}
ptr := v_1.Args[0]
len := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [3*config.PtrSize] dst (SliceMake ptr len cap) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store [config.PtrSize] dst ptr mem)))
for {
if v.AuxInt != 3*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpSliceMake {
break
}
ptr := v_1.Args[0]
len := v_1.Args[1]
cap := v_1.Args[2]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
v3 := b.NewValue0(v.Line, OpStore, TypeMem)
v3.AuxInt = config.PtrSize
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
// match: (Store [2*config.PtrSize] dst (IMake itab data) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store [config.PtrSize] dst itab mem))
for {
if v.AuxInt != 2*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpIMake {
break
}
itab := v_1.Args[0]
data := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
return false
}
func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
b := v.Block
_ = b
......
......@@ -42,10 +42,6 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
return rewriteValuegeneric_OpCom64(v, config)
case OpCom8:
return rewriteValuegeneric_OpCom8(v, config)
case OpComplexImag:
return rewriteValuegeneric_OpComplexImag(v, config)
case OpComplexReal:
return rewriteValuegeneric_OpComplexReal(v, config)
case OpConstInterface:
return rewriteValuegeneric_OpConstInterface(v, config)
case OpConstSlice:
......@@ -108,10 +104,6 @@ func rewriteValuegeneric(v *Value, config *Config) bool {
return rewriteValuegeneric_OpGreater8(v, config)
case OpGreater8U:
return rewriteValuegeneric_OpGreater8U(v, config)
case OpIData:
return rewriteValuegeneric_OpIData(v, config)
case OpITab:
return rewriteValuegeneric_OpITab(v, config)
case OpIsInBounds:
return rewriteValuegeneric_OpIsInBounds(v, config)
case OpIsSliceInBounds:
......@@ -1020,7 +1012,7 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
}
// match: (Arg {n} [off])
// cond: v.Type.IsSlice()
// result: (SliceMake (Arg <config.fe.TypeBytePtr()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
// result: (SliceMake (Arg <v.Type.ElemType().PtrTo()> {n} [off]) (Arg <config.fe.TypeInt()> {n} [off+config.PtrSize]) (Arg <config.fe.TypeInt()> {n} [off+2*config.PtrSize]))
for {
n := v.Aux
off := v.AuxInt
......@@ -1028,7 +1020,7 @@ func rewriteValuegeneric_OpArg(v *Value, config *Config) bool {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpArg, config.fe.TypeBytePtr())
v0 := b.NewValue0(v.Line, OpArg, v.Type.ElemType().PtrTo())
v0.Aux = n
v0.AuxInt = off
v.AddArg(v0)
......@@ -1310,44 +1302,6 @@ func rewriteValuegeneric_OpCom8(v *Value, config *Config) bool {
}
return false
}
func rewriteValuegeneric_OpComplexImag(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ComplexImag (ComplexMake _ imag ))
// cond:
// result: imag
for {
v_0 := v.Args[0]
if v_0.Op != OpComplexMake {
break
}
imag := v_0.Args[1]
v.reset(OpCopy)
v.Type = imag.Type
v.AddArg(imag)
return true
}
return false
}
func rewriteValuegeneric_OpComplexReal(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ComplexReal (ComplexMake real _ ))
// cond:
// result: real
for {
v_0 := v.Args[0]
if v_0.Op != OpComplexMake {
break
}
real := v_0.Args[0]
v.reset(OpCopy)
v.Type = real.Type
v.AddArg(real)
return true
}
return false
}
func rewriteValuegeneric_OpConstInterface(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -1369,13 +1323,13 @@ func rewriteValuegeneric_OpConstSlice(v *Value, config *Config) bool {
_ = b
// match: (ConstSlice)
// cond: config.PtrSize == 4
// result: (SliceMake (ConstNil <config.fe.TypeBytePtr()>) (Const32 <config.fe.TypeInt()> [0]) (Const32 <config.fe.TypeInt()> [0]))
// result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const32 <config.fe.TypeInt()> [0]) (Const32 <config.fe.TypeInt()> [0]))
for {
if !(config.PtrSize == 4) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
v0 := b.NewValue0(v.Line, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpConst32, config.fe.TypeInt())
v1.AuxInt = 0
......@@ -1387,13 +1341,13 @@ func rewriteValuegeneric_OpConstSlice(v *Value, config *Config) bool {
}
// match: (ConstSlice)
// cond: config.PtrSize == 8
// result: (SliceMake (ConstNil <config.fe.TypeBytePtr()>) (Const64 <config.fe.TypeInt()> [0]) (Const64 <config.fe.TypeInt()> [0]))
// result: (SliceMake (ConstNil <v.Type.ElemType().PtrTo()>) (Const64 <config.fe.TypeInt()> [0]) (Const64 <config.fe.TypeInt()> [0]))
for {
if !(config.PtrSize == 8) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpConstNil, config.fe.TypeBytePtr())
v0 := b.NewValue0(v.Line, OpConstNil, v.Type.ElemType().PtrTo())
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpConst64, config.fe.TypeInt())
v1.AuxInt = 0
......@@ -2680,44 +2634,6 @@ func rewriteValuegeneric_OpGreater8U(v *Value, config *Config) bool {
}
return false
}
func rewriteValuegeneric_OpIData(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (IData (IMake _ data))
// cond:
// result: data
for {
v_0 := v.Args[0]
if v_0.Op != OpIMake {
break
}
data := v_0.Args[1]
v.reset(OpCopy)
v.Type = data.Type
v.AddArg(data)
return true
}
return false
}
func rewriteValuegeneric_OpITab(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (ITab (IMake itab _))
// cond:
// result: itab
for {
v_0 := v.Args[0]
if v_0.Op != OpIMake {
break
}
itab := v_0.Args[0]
v.reset(OpCopy)
v.Type = itab.Type
v.AddArg(itab)
return true
}
return false
}
func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
b := v.Block
_ = b
......@@ -3567,133 +3483,6 @@ func rewriteValuegeneric_OpLoad(v *Value, config *Config) bool {
v.AddArg(v5)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 8
// result: (ComplexMake (Load <config.fe.TypeFloat32()> ptr mem) (Load <config.fe.TypeFloat32()> (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsComplex() && t.Size() == 8) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat32())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsComplex() && t.Size() == 16
// result: (ComplexMake (Load <config.fe.TypeFloat64()> ptr mem) (Load <config.fe.TypeFloat64()> (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsComplex() && t.Size() == 16) {
break
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeFloat64())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsString()
// result: (StringMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsString()) {
break
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsSlice()
// result: (SliceMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <config.fe.TypeInt()> (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsSlice()) {
break
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v3 := b.NewValue0(v.Line, OpLoad, config.fe.TypeInt())
v4 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
v3.AddArg(mem)
v.AddArg(v3)
return true
}
// match: (Load <t> ptr mem)
// cond: t.IsInterface()
// result: (IMake (Load <config.fe.TypeBytePtr()> ptr mem) (Load <config.fe.TypeBytePtr()> (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
if !(t.IsInterface()) {
break
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v1 := b.NewValue0(v.Line, OpLoad, config.fe.TypeBytePtr())
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
return false
}
func rewriteValuegeneric_OpLsh16x16(v *Value, config *Config) bool {
......@@ -7735,25 +7524,6 @@ func rewriteValuegeneric_OpSliceLen(v *Value, config *Config) bool {
func rewriteValuegeneric_OpSlicePtr(v *Value, config *Config) bool {
b := v.Block
_ = b
// match: (SlicePtr (SliceMake (Const64 <t> [c]) _ _))
// cond:
// result: (Const64 <t> [c])
for {
v_0 := v.Args[0]
if v_0.Op != OpSliceMake {
break
}
v_0_0 := v_0.Args[0]
if v_0_0.Op != OpConst64 {
break
}
t := v_0_0.Type
c := v_0_0.AuxInt
v.reset(OpConst64)
v.Type = t
v.AuxInt = c
return true
}
// match: (SlicePtr (SliceMake (SlicePtr x) _ _))
// cond:
// result: (SlicePtr x)
......@@ -7920,165 +7690,6 @@ func rewriteValuegeneric_OpStore(v *Value, config *Config) bool {
v.AddArg(v1)
return true
}
// match: (Store [8] dst (ComplexMake real imag) mem)
// cond:
// result: (Store [4] (OffPtr <config.fe.TypeFloat32().PtrTo()> [4] dst) imag (Store [4] dst real mem))
for {
if v.AuxInt != 8 {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpComplexMake {
break
}
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = 4
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat32().PtrTo())
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = 4
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [16] dst (ComplexMake real imag) mem)
// cond:
// result: (Store [8] (OffPtr <config.fe.TypeFloat64().PtrTo()> [8] dst) imag (Store [8] dst real mem))
for {
if v.AuxInt != 16 {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpComplexMake {
break
}
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = 8
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeFloat64().PtrTo())
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = 8
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [2*config.PtrSize] dst (StringMake ptr len) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store [config.PtrSize] dst ptr mem))
for {
if v.AuxInt != 2*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpStringMake {
break
}
ptr := v_1.Args[0]
len := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [3*config.PtrSize] dst (SliceMake ptr len cap) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store [config.PtrSize] (OffPtr <config.fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store [config.PtrSize] dst ptr mem)))
for {
if v.AuxInt != 3*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpSliceMake {
break
}
ptr := v_1.Args[0]
len := v_1.Args[1]
cap := v_1.Args[2]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v2 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeInt().PtrTo())
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
v3 := b.NewValue0(v.Line, OpStore, TypeMem)
v3.AuxInt = config.PtrSize
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
v1.AddArg(v3)
v.AddArg(v1)
return true
}
// match: (Store [2*config.PtrSize] dst (IMake itab data) mem)
// cond:
// result: (Store [config.PtrSize] (OffPtr <config.fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store [config.PtrSize] dst itab mem))
for {
if v.AuxInt != 2*config.PtrSize {
break
}
dst := v.Args[0]
v_1 := v.Args[1]
if v_1.Op != OpIMake {
break
}
itab := v_1.Args[0]
data := v_1.Args[1]
mem := v.Args[2]
v.reset(OpStore)
v.AuxInt = config.PtrSize
v0 := b.NewValue0(v.Line, OpOffPtr, config.fe.TypeBytePtr().PtrTo())
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
v1 := b.NewValue0(v.Line, OpStore, TypeMem)
v1.AuxInt = config.PtrSize
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
v.AddArg(v1)
return true
}
// match: (Store [size] dst (Load <t> src mem) mem)
// cond: !config.fe.CanSSA(t)
// result: (Move [size] dst src mem)
......
......@@ -187,7 +187,7 @@ func (s *stackAllocState) stackalloc() {
if name.N != nil && v.Type.Equal(name.Type) {
for _, id := range s.interfere[v.ID] {
h := f.getHome(id)
if h != nil && h.(LocalSlot) == name {
if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
// A variable can interfere with itself.
// It is rare, but but it can happen.
goto noname
......
// +build amd64
// errorcheck -0 -l -live -wb=0
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// liveness tests with inlining disabled.
// see also live2.go.
package main
func printnl()
//go:noescape
func printpointer(**int)
//go:noescape
func printintpointer(*int)
//go:noescape
func printstringpointer(*string)
//go:noescape
func printstring(string)
//go:noescape
func printbytepointer(*byte)
func printint(int)
func f1() {
var x *int
printpointer(&x) // ERROR "live at call to printpointer: x$"
printpointer(&x) // ERROR "live at call to printpointer: x$"
}
func f2(b bool) {
if b {
printint(0) // nothing live here
return
}
var x *int
printpointer(&x) // ERROR "live at call to printpointer: x$"
printpointer(&x) // ERROR "live at call to printpointer: x$"
}
func f3(b1, b2 bool) {
// Because x and y are ambiguously live, they appear
// live throughout the function, to avoid being poisoned
// in GODEBUG=gcdead=1 mode.
printint(0) // ERROR "live at call to printint: x y$"
if b1 == false {
printint(0) // ERROR "live at call to printint: x y$"
return
}
if b2 {
var x *int
printpointer(&x) // ERROR "live at call to printpointer: x y$"
printpointer(&x) // ERROR "live at call to printpointer: x y$"
} else {
var y *int
printpointer(&y) // ERROR "live at call to printpointer: x y$"
printpointer(&y) // ERROR "live at call to printpointer: x y$"
}
printint(0) // ERROR "f3: x \(type \*int\) is ambiguously live$" "f3: y \(type \*int\) is ambiguously live$" "live at call to printint: x y$"
}
// The old algorithm treated x as live on all code that
// could flow to a return statement, so it included the
// function entry and code above the declaration of x
// but would not include an indirect use of x in an infinite loop.
// Check that these cases are handled correctly.
func f4(b1, b2 bool) { // x not live here
if b2 {
printint(0) // x not live here
return
}
var z **int
x := new(int)
*x = 42
z = &x
printint(**z) // ERROR "live at call to printint: x$"
if b2 {
printint(1) // x not live here
return
}
for {
printint(**z) // ERROR "live at call to printint: x$"
}
}
func f5(b1 bool) {
var z **int
if b1 {
x := new(int)
*x = 42
z = &x
} else {
y := new(int)
*y = 54
z = &y
}
printint(**z) // ERROR "f5: x \(type \*int\) is ambiguously live$" "f5: y \(type \*int\) is ambiguously live$" "live at call to printint: x y$"
}
// confusion about the _ result used to cause spurious "live at entry to f6: _".
func f6() (_, y string) {
y = "hello"
return
}
// confusion about addressed results used to cause "live at entry to f7: x".
func f7() (x string) {
_ = &x
x = "hello"
return
}
// ignoring block returns used to cause "live at entry to f8: x, y".
func f8() (x, y string) {
return g8()
}
func g8() (string, string)
// ignoring block assignments used to cause "live at entry to f9: x"
// issue 7205
var i9 interface{}
func f9() bool {
g8()
x := i9
return x != interface{}(99.0i) // ERROR "live at call to convT2E: x.data x.type$"
}
// liveness formerly confused by UNDEF followed by RET,
// leading to "live at entry to f10: ~r1" (unnamed result).
func f10() string {
panic(1)
}
// liveness formerly confused by select, thinking runtime.selectgo
// can return to next instruction; it always jumps elsewhere.
// note that you have to use at least two cases in the select
// to get a true select; smaller selects compile to optimized helper functions.
var c chan *int
var b bool
// this used to have a spurious "live at entry to f11a: ~r0"
func f11a() *int {
select { // ERROR "live at call to newselect: autotmp_[0-9]+$" "live at call to selectgo: autotmp_[0-9]+$"
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+$"
return nil
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+$"
return nil
}
}
func f11b() *int {
p := new(int)
if b {
// At this point p is dead: the code here cannot
// get to the bottom of the function.
// This used to have a spurious "live at call to printint: p".
printint(1) // nothing live here!
select { // ERROR "live at call to newselect: autotmp_[0-9]+$" "live at call to selectgo: autotmp_[0-9]+$"
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+$"
return nil
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+$"
return nil
}
}
println(*p)
return nil
}
var sink *int
func f11c() *int {
p := new(int)
sink = p // prevent stack allocation, otherwise p is rematerializeable
if b {
// Unlike previous, the cases in this select fall through,
// so we can get to the println, so p is not dead.
printint(1) // ERROR "live at call to printint: p$"
select { // ERROR "live at call to newselect: autotmp_[0-9]+ p$" "live at call to selectgo: autotmp_[0-9]+ p$"
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+ p$"
case <-c: // ERROR "live at call to selectrecv: autotmp_[0-9]+ p$"
}
}
println(*p)
return nil
}
// similarly, select{} does not fall through.
// this used to have a spurious "live at entry to f12: ~r0".
func f12() *int {
if b {
select {}
} else {
return nil
}
}
// incorrectly placed VARDEF annotations can cause missing liveness annotations.
// this used to be missing the fact that s is live during the call to g13 (because it is
// needed for the call to h13).
func f13() {
s := g14()
s = h13(s, g13(s)) // ERROR "live at call to g13: s.ptr$"
}
func g13(string) string
func h13(string, string) string
// more incorrectly placed VARDEF.
func f14() {
x := g14()
printstringpointer(&x) // ERROR "live at call to printstringpointer: x$"
}
func g14() string
func f15() {
var x string
_ = &x
x = g15() // ERROR "live at call to g15: x$"
printstring(x) // ERROR "live at call to printstring: x$"
}
func g15() string
// Checking that various temporaries do not persist or cause
// ambiguously live values that must be zeroed.
// The exact temporary names are inconsequential but we are
// trying to check that there is only one at any given site,
// and also that none show up in "ambiguously live" messages.
var m map[string]int
func f16() {
if b {
delete(m, "hi") // ERROR "live at call to mapdelete: autotmp_[0-9]+$"
}
delete(m, "hi") // ERROR "live at call to mapdelete: autotmp_[0-9]+$"
delete(m, "hi") // ERROR "live at call to mapdelete: autotmp_[0-9]+$"
}
var m2s map[string]*byte
var m2 map[[2]string]*byte
var x2 [2]string
var bp *byte
func f17a() {
// value temporary only
if b {
m2[x2] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
}
m2[x2] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
m2[x2] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
}
func f17b() {
// key temporary only
if b {
m2s["x"] = bp // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
}
m2s["x"] = bp // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
m2s["x"] = bp // ERROR "live at call to mapassign1: autotmp_[0-9]+$"
}
func f17c() {
// key and value temporaries
if b {
m2s["x"] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
}
m2s["x"] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
m2s["x"] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
}
func g18() [2]string
func f18() {
// key temporary for mapaccess.
// temporary introduced by orderexpr.
var z *byte
if b {
z = m2[g18()] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
}
z = m2[g18()] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
z = m2[g18()] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printbytepointer(z)
}
var ch chan *byte
func f19() {
// dest temporary for channel receive.
var z *byte
if b {
z = <-ch // ERROR "live at call to chanrecv1: autotmp_[0-9]+$"
}
z = <-ch // ERROR "live at call to chanrecv1: autotmp_[0-9]+$"
z = <-ch // ERROR "live at call to chanrecv1: autotmp_[0-9]+$"
printbytepointer(z)
}
func f20() {
// src temporary for channel send
if b {
ch <- nil // ERROR "live at call to chansend1: autotmp_[0-9]+$"
}
ch <- nil // ERROR "live at call to chansend1: autotmp_[0-9]+$"
ch <- nil // ERROR "live at call to chansend1: autotmp_[0-9]+$"
}
func f21() {
// key temporary for mapaccess using array literal key.
var z *byte
if b {
z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
}
z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printbytepointer(z)
}
func f23() {
// key temporary for two-result map access using array literal key.
var z *byte
var ok bool
if b {
z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: autotmp_[0-9]+$"
}
z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: autotmp_[0-9]+$"
z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: autotmp_[0-9]+$"
printbytepointer(z)
print(ok)
}
func f24() {
// key temporary for map access using array literal key.
// value temporary too.
if b {
m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
}
m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign1: autotmp_[0-9]+ autotmp_[0-9]+$"
}
// defer should not cause spurious ambiguously live variables
func f25(b bool) {
defer g25()
if b {
return
}
var x string
_ = &x
x = g15() // ERROR "live at call to g15: x$"
printstring(x) // ERROR "live at call to printstring: x$"
} // ERROR "live at call to deferreturn: x$"
func g25()
// non-escaping ... slices passed to function call should die on return,
// so that the temporaries do not stack and do not cause ambiguously
// live variables.
func f26(b bool) {
if b {
print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: autotmp_[0-9]+$"
}
print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: autotmp_[0-9]+$"
print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: autotmp_[0-9]+$"
printnl()
}
//go:noescape
func print26(...interface{})
// non-escaping closures passed to function call should die on return
func f27(b bool) {
x := 0
if b {
call27(func() { x++ }) // ERROR "live at call to call27: autotmp_[0-9]+$"
}
call27(func() { x++ }) // ERROR "live at call to call27: autotmp_[0-9]+$"
call27(func() { x++ }) // ERROR "live at call to call27: autotmp_[0-9]+$"
printnl()
}
// but defer does escape to later execution in the function
func f27defer(b bool) {
x := 0
if b {
defer call27(func() { x++ }) // ERROR "live at call to deferproc: autotmp_[0-9]+$" "live at call to deferreturn: autotmp_[0-9]+$"
}
defer call27(func() { x++ }) // ERROR "f27defer: autotmp_[0-9]+ \(type struct { F uintptr; x \*int }\) is ambiguously live$" "live at call to deferproc: autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to deferreturn: autotmp_[0-9]+ autotmp_[0-9]+$"
printnl() // ERROR "live at call to printnl: autotmp_[0-9]+ autotmp_[0-9]+$"
} // ERROR "live at call to deferreturn: autotmp_[0-9]+ autotmp_[0-9]+$"
// and newproc (go) escapes to the heap
func f27go(b bool) {
x := 0
if b {
go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newproc: &x$"
}
go call27(func() { x++ }) // ERROR "live at call to newobject: &x$"
printnl()
}
//go:noescape
func call27(func())
// concatstring slice should die on return
var s1, s2, s3, s4, s5, s6, s7, s8, s9, s10 string
func f28(b bool) {
if b {
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: autotmp_[0-9]+$" "live at call to printstring: autotmp_[0-9]+$"
}
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: autotmp_[0-9]+$" "live at call to printstring: autotmp_[0-9]+$"
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: autotmp_[0-9]+$" "live at call to printstring: autotmp_[0-9]+$"
}
// map iterator should die on end of range loop
func f29(b bool) {
if b {
for k := range m { // ERROR "live at call to mapiterinit: autotmp_[0-9]+$" "live at call to mapiternext: autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: autotmp_[0-9]+$"
}
}
for k := range m { // ERROR "live at call to mapiterinit: autotmp_[0-9]+$" "live at call to mapiternext: autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: autotmp_[0-9]+$"
}
for k := range m { // ERROR "live at call to mapiterinit: autotmp_[0-9]+$" "live at call to mapiternext: autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: autotmp_[0-9]+$"
}
}
// copy of array of pointers should die at end of range loop
var ptrarr [10]*int
func f30(b bool) {
// two live temps during print(p):
// the copy of ptrarr and the internal iterator pointer.
if b {
for _, p := range ptrarr {
printintpointer(p) // ERROR "live at call to printintpointer: autotmp_[0-9]+ autotmp_[0-9]+$"
}
}
for _, p := range ptrarr {
printintpointer(p) // ERROR "live at call to printintpointer: autotmp_[0-9]+ autotmp_[0-9]+$"
}
for _, p := range ptrarr {
printintpointer(p) // ERROR "live at call to printintpointer: autotmp_[0-9]+ autotmp_[0-9]+$"
}
}
// conversion to interface should not leave temporary behind
func f31(b1, b2, b3 bool) {
if b1 {
g31("a") // ERROR "live at call to convT2E: autotmp_[0-9]+$" "live at call to g31: autotmp_[0-9]+$"
}
if b2 {
h31("b") // ERROR "live at call to convT2E: autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to h31: autotmp_[0-9]+$" "live at call to newobject: autotmp_[0-9]+$"
}
if b3 {
panic("asdf") // ERROR "live at call to convT2E: autotmp_[0-9]+$" "live at call to gopanic: autotmp_[0-9]+$"
}
print(b3)
}
func g31(interface{})
func h31(...interface{})
// non-escaping partial functions passed to function call should die on return
type T32 int
func (t *T32) Inc() { // ERROR "live at entry to \(\*T32\).Inc: t$"
*t++
}
var t32 T32
func f32(b bool) {
if b {
call32(t32.Inc) // ERROR "live at call to call32: autotmp_[0-9]+$"
}
call32(t32.Inc) // ERROR "live at call to call32: autotmp_[0-9]+$"
call32(t32.Inc) // ERROR "live at call to call32: autotmp_[0-9]+$"
}
//go:noescape
func call32(func())
// temporaries introduced during if conditions and && || expressions
// should die once the condition has been acted upon.
var m33 map[interface{}]int
func f33() {
if m33[nil] == 0 { // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printnl()
return
} else {
printnl()
}
printnl()
}
func f34() {
if m33[nil] == 0 { // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printnl()
return
}
printnl()
}
func f35() {
if m33[nil] == 0 && m33[nil] == 0 { // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printnl()
return
}
printnl()
}
func f36() {
if m33[nil] == 0 || m33[nil] == 0 { // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printnl()
return
}
printnl()
}
func f37() {
if (m33[nil] == 0 || m33[nil] == 0) && m33[nil] == 0 { // ERROR "live at call to mapaccess1: autotmp_[0-9]+$"
printnl()
return
}
printnl()
}
// select temps should disappear in the case bodies
var c38 chan string
func fc38() chan string
func fi38(int) *string
func fb38() *bool
func f38(b bool) {
// we don't care what temps are printed on the lines with output.
// we care that the println lines have no live variables
// and therefore no output.
if b {
select { // ERROR "live at call to newselect: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to selectgo: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$"
case <-fc38(): // ERROR "live at call to selectrecv: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$"
printnl()
case fc38() <- *fi38(1): // ERROR "live at call to fc38: autotmp_[0-9]+$" "live at call to fi38: autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to selectsend: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$"
printnl()
case *fi38(2) = <-fc38(): // ERROR "live at call to fc38: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to fi38: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to selectrecv: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$"
printnl()
case *fi38(3), *fb38() = <-fc38(): // ERROR "live at call to fb38: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to fc38: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to fi38: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$" "live at call to selectrecv2: autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+ autotmp_[0-9]+$"
printnl()
}
printnl()
}
printnl()
}
// issue 8097: mishandling of x = x during return.
func f39() (x []int) {
x = []int{1}
printnl() // ERROR "live at call to printnl: autotmp_[0-9]+$"
return x
}
func f39a() (x []int) {
x = []int{1}
printnl() // ERROR "live at call to printnl: autotmp_[0-9]+$"
return
}
// TODO: Reenable after #14904 is fixed.
//func f39b() (x [10]*int) {
// x = [10]*int{}
// x[0] = new(int) // E.R.R.O.R. "live at call to newobject: x$"
// printnl() // E.R.R.O.R. "live at call to printnl: x$"
// return x
//}
func f39c() (x [10]*int) {
x = [10]*int{}
x[0] = new(int) // ERROR "live at call to newobject: x$"
printnl() // ERROR "live at call to printnl: x$"
return
}
// issue 8142: lost 'addrtaken' bit on inlined variables.
// no inlining in this test, so just checking that non-inlined works.
type T40 struct {
m map[int]int
}
func newT40() *T40 {
ret := T40{}
ret.m = make(map[int]int) // ERROR "live at call to makemap: &ret$"
return &ret
}
func bad40() {
t := newT40()
_ = t
printnl()
}
func good40() {
ret := T40{}
ret.m = make(map[int]int) // ERROR "live at call to makemap: autotmp_[0-9]+ ret$"
t := &ret
printnl() // ERROR "live at call to printnl: autotmp_[0-9]+ ret$"
_ = t
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment