Commit 80590711 authored by Dmitry Vyukov's avatar Dmitry Vyukov

runtime: bound defer pools

The unbounded list-based defer pool can grow infinitely.
This can happen if a goroutine routinely allocates a defer;
then blocks on one P; and then unblocked, scheduled and
frees the defer on another P.
The scenario was reported on golang-nuts list.

We've been here several times. Any unbounded local caches
are bad and grow to infinite size. This change introduces
central defer pool; local pools become fixed-size
with the only purpose of amortizing accesses to the
central pool.

Change-Id: Iadcfb113ccecf912e1b64afc07926f0de9de2248
Reviewed-on: https://go-review.googlesource.com/3741Reviewed-by: 's avatarKeith Randall <khr@golang.org>
parent 71be0138
...@@ -25,6 +25,21 @@ func clearpools() { ...@@ -25,6 +25,21 @@ func clearpools() {
poolcleanup() poolcleanup()
} }
// Clear central defer pools.
// Leave per-P pools alone, they have strictly bounded size.
lock(&sched.deferlock)
for i := range sched.deferpool {
// disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
var d, dlink *_defer
for d = sched.deferpool[i]; d != nil; d = dlink {
dlink = d.link
d.link = nil
}
sched.deferpool[i] = nil
}
unlock(&sched.deferlock)
for _, p := range &allp { for _, p := range &allp {
if p == nil { if p == nil {
break break
...@@ -43,18 +58,6 @@ func clearpools() { ...@@ -43,18 +58,6 @@ func clearpools() {
} }
c.sudogcache = nil c.sudogcache = nil
} }
// clear defer pools
for i := range p.deferpool {
// disconnect cached list before dropping it on the floor,
// so that a dangling ref to one entry does not pin all of them.
var d, dlink *_defer
for d = p.deferpool[i]; d != nil; d = dlink {
dlink = d.link
d.link = nil
}
p.deferpool[i] = nil
}
} }
} }
......
...@@ -166,9 +166,20 @@ func newdefer(siz int32) *_defer { ...@@ -166,9 +166,20 @@ func newdefer(siz int32) *_defer {
mp := acquirem() mp := acquirem()
if sc < uintptr(len(p{}.deferpool)) { if sc < uintptr(len(p{}.deferpool)) {
pp := mp.p pp := mp.p
d = pp.deferpool[sc] if len(pp.deferpool[sc]) == 0 {
if d != nil { lock(&sched.deferlock)
pp.deferpool[sc] = d.link for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
d := sched.deferpool[sc]
sched.deferpool[sc] = d.link
d.link = nil
pp.deferpool[sc] = append(pp.deferpool[sc], d)
}
unlock(&sched.deferlock)
}
if ln := len(pp.deferpool[sc]); ln > 0 {
d = pp.deferpool[sc][ln-1]
pp.deferpool[sc][ln-1] = nil
pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
} }
} }
if d == nil { if d == nil {
...@@ -214,9 +225,28 @@ func freedefer(d *_defer) { ...@@ -214,9 +225,28 @@ func freedefer(d *_defer) {
if sc < uintptr(len(p{}.deferpool)) { if sc < uintptr(len(p{}.deferpool)) {
mp := acquirem() mp := acquirem()
pp := mp.p pp := mp.p
if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
// Transfer half of local cache to the central cache.
var first, last *_defer
for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
ln := len(pp.deferpool[sc])
d := pp.deferpool[sc][ln-1]
pp.deferpool[sc][ln-1] = nil
pp.deferpool[sc] = pp.deferpool[sc][:ln-1]
if first == nil {
first = d
} else {
last.link = d
}
last = d
}
lock(&sched.deferlock)
last.link = sched.deferpool[sc]
sched.deferpool[sc] = first
unlock(&sched.deferlock)
}
*d = _defer{} *d = _defer{}
d.link = pp.deferpool[sc] pp.deferpool[sc] = append(pp.deferpool[sc], d)
pp.deferpool[sc] = d
releasem(mp) releasem(mp)
} }
} }
......
...@@ -2540,6 +2540,9 @@ func procresize(new int32) *p { ...@@ -2540,6 +2540,9 @@ func procresize(new int32) *p {
p = newP() p = newP()
p.id = i p.id = i
p.status = _Pgcstop p.status = _Pgcstop
for i := range p.deferpool {
p.deferpool[i] = p.deferpoolbuf[i][:0]
}
atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(p)) atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(p))
} }
if p.mcache == nil { if p.mcache == nil {
...@@ -2578,6 +2581,13 @@ func procresize(new int32) *p { ...@@ -2578,6 +2581,13 @@ func procresize(new int32) *p {
} }
sched.runqsize++ sched.runqsize++
} }
for i := range p.deferpool {
for j := range p.deferpoolbuf[i] {
p.deferpoolbuf[i][j] = nil
}
p.deferpool[i] = p.deferpoolbuf[i][:0]
}
freemcache(p.mcache) freemcache(p.mcache)
p.mcache = nil p.mcache = nil
gfpurge(p) gfpurge(p)
......
...@@ -314,7 +314,9 @@ type p struct { ...@@ -314,7 +314,9 @@ type p struct {
syscalltick uint32 // incremented on every system call syscalltick uint32 // incremented on every system call
m *m // back-link to associated m (nil if idle) m *m // back-link to associated m (nil if idle)
mcache *mcache mcache *mcache
deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c)
deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
deferpoolbuf [5][32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64 goidcache uint64
...@@ -365,6 +367,9 @@ type schedt struct { ...@@ -365,6 +367,9 @@ type schedt struct {
gfree *g gfree *g
ngfree int32 ngfree int32
deferlock mutex
deferpool [5]*_defer // central pool of available defer structs of different sizes
gcwaiting uint32 // gc is waiting to run gcwaiting uint32 // gc is waiting to run
stopwait int32 stopwait int32
stopnote note stopnote note
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment