Commit e396667b authored by Keith Randall's avatar Keith Randall

[release-branch.go1.8] cmd/compile: zero ambiguously live variables at VARKILLs

This is a redo of CL 41076 backported to the 1.8 release branch.
There were major conflicts, so I had to basically rewrite it again
from scratch.  The way Progs are allocated changed.  Liveness analysis
and Prog generation got reordered.  Liveness analysis changed from
running on gc.BasicBlock to ssa.Block.  All that makes the logic quite
a bit different.

Please review carefully.

From CL 41076:

At VARKILLs, zero a variable if it is ambiguously live.
After the VARKILL anything this variable references
might be collected. If it were to become live again later,
the GC will see references to already-collected objects.

We don't know a variable is ambiguously live until very
late in compilation (after lowering, register allocation, ...),
so it is hard to generate the code in an arch-independent way.
We also have to be careful not to clobber any registers.
Fortunately, this almost never happens so performance is ~irrelevant.

There are only 2 instances where this triggers in the stdlib.

Fixes #20029

Change-Id: Ibb757eec58ee07f40df5e561b19d315684dc4bda
Reviewed-on: https://go-review.googlesource.com/43998
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarMatthew Dempsky <mdempsky@google.com>
parent daf6706f
......@@ -27,4 +27,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -166,6 +166,27 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32, x0 *uin
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
op := x86.AMOVQ
if gc.Widthptr == 4 {
op = x86.AMOVL
}
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += int64(gc.Widthptr) {
p := gc.AddAsmAfter(op, pp)
pp = p
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = x86.REG_SP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
// This is actually not the x86 NOP anymore,
// but at the point where it gets used, AX is dead
......
......@@ -21,4 +21,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -92,6 +92,27 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Pr
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
p := gc.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
for i := int64(0); i < size; i += 4 {
p := gc.AddAsmAfter(arm.AMOVW, pp)
pp = p
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REGTMP
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = arm.REGSP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(arm.AAND)
......
......@@ -21,4 +21,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -103,6 +103,23 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += 8 {
p := gc.AddAsmAfter(arm64.AMOVD, pp)
pp = p
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = arm64.REGSP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
......
......@@ -361,6 +361,12 @@ type Arch struct {
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
// ZeroAuto emits code to zero the given auto stack variable.
// Code is added immediately after pp.
// ZeroAuto must not use any non-temporary registers.
// ZeroAuto will only be called for variables which contain a pointer.
ZeroAuto func(n *Node, pp *obj.Prog)
}
var pcloc int32
......
......@@ -72,6 +72,15 @@ func Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset in
return q
}
func AddAsmAfter(as obj.As, p *obj.Prog) *obj.Prog {
q := Ctxt.NewProg()
Clearp(q)
q.As = as
q.Link = p.Link
p.Link = q
return q
}
func ggloblnod(nam *Node) {
s := Linksym(nam.Sym)
s.Gotype = Linksym(ngotype(nam))
......
......@@ -120,7 +120,30 @@ func Gvarlive(n *Node) {
}
func removevardef(firstp *obj.Prog) {
// At VARKILLs, zero variable if it is ambiguously live.
// After the VARKILL anything this variable references
// might be collected. If it were to become live again later,
// the GC will see references to already-collected objects.
// See issue 20029.
for p := firstp; p != nil; p = p.Link {
if p.As != obj.AVARKILL {
continue
}
n := p.To.Node.(*Node)
if !n.Name.Needzero {
continue
}
if n.Class != PAUTO {
Fatalf("zero of variable which isn't PAUTO %v", n)
}
if n.Type.Size()%int64(Widthptr) != 0 {
Fatalf("zero of variable not a multiple of ptr size %v", n)
}
Thearch.ZeroAuto(n, p)
}
for p := firstp; p != nil; p = p.Link {
for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL || p.Link.As == obj.AVARLIVE) {
p.Link = p.Link.Link
}
......
......@@ -23,4 +23,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -92,6 +92,23 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += 4 {
p := gc.AddAsmAfter(mips.AMOVW, pp)
pp = p
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = mips.REGSP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
......
......@@ -25,4 +25,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -95,6 +95,23 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += 8 {
p := gc.AddAsmAfter(mips.AMOVV, pp)
pp = p
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = mips.REGSP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
......
......@@ -24,6 +24,7 @@ func Init() {
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
initvariants()
initproginfo()
......
......@@ -90,6 +90,23 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += 8 {
p := gc.AddAsmAfter(ppc64.AMOVD, pp)
pp = p
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = ppc64.REGSP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(ppc64.AOR)
p.From.Type = obj.TYPE_REG
......
......@@ -20,4 +20,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -143,6 +143,19 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
p := gc.AddAsmAfter(s390x.ACLEAR, pp)
pp = p
p.From.Type = obj.TYPE_CONST
p.From.Offset = n.Type.Size()
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = s390x.REGSP
p.To.Offset = n.Xoffset
p.To.Sym = gc.Linksym(n.Sym)
}
func ginsnop() {
p := gc.Prog(s390x.AOR)
p.From.Type = obj.TYPE_REG
......
......@@ -31,4 +31,5 @@ func Init() {
gc.Thearch.SSAMarkMoves = ssaMarkMoves
gc.Thearch.SSAGenValue = ssaGenValue
gc.Thearch.SSAGenBlock = ssaGenBlock
gc.Thearch.ZeroAuto = zeroAuto
}
......@@ -84,6 +84,23 @@ func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Pr
return p
}
func zeroAuto(n *gc.Node, pp *obj.Prog) {
// Note: this code must not clobber any registers.
sym := gc.Linksym(n.Sym)
size := n.Type.Size()
for i := int64(0); i < size; i += 4 {
p := gc.AddAsmAfter(x86.AMOVL, pp)
pp = p
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = x86.REG_SP
p.To.Offset = n.Xoffset + i
p.To.Sym = sym
}
}
func ginsnop() {
p := gc.Prog(x86.AXCHGL)
p.From.Type = obj.TYPE_REG
......
// run
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Issue 20029: make sure we zero at VARKILLs of
// ambiguously live variables.
// The ambiguously live variable here is the hiter
// for the inner range loop.
package main
import "runtime"
func f(m map[int]int) {
outer:
for i := 0; i < 10; i++ {
for k := range m {
if k == 5 {
continue outer
}
}
runtime.GC()
break
}
runtime.GC()
}
func main() {
m := map[int]int{1: 2, 2: 3, 3: 4}
f(m)
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment