Commit 70d7e96c authored by Austin Clements's avatar Austin Clements

runtime: avoid tracking spans with no objects with mcentral

Lazy mcache flushing (golang.org/cl/134783) made it so that moving a
span from an mcache to an mcentral was sometimes responsible for
sweeping the span. However, it did a "preserving" sweep, which meant
it retained ownership, even if the sweeper swept all objects in the
span. As a result, we could put a completely unused span back in the
mcentral.

Fix this by first taking back ownership of the span into the mcentral
and moving it to the right mcentral list, and then doing a
non-preserving sweep. The non-preserving sweep will move the span to
the heap if it sweeps all objects.

Change-Id: I244b1893b44b8c00264f0928ac9239449775f617
Reviewed-on: https://go-review.googlesource.com/c/140597
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarMichael Knyszek <mknyszek@google.com>
parent 7d2f46da
......@@ -153,16 +153,6 @@ func (c *mcentral) uncacheSpan(s *mspan) {
throw("uncaching span but s.allocCount == 0")
}
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.allocCount)
// cacheSpan updated alloc assuming all objects on s were
// going to be allocated. Adjust for any that weren't. We must
// do this before potentially sweeping the span.
if n > 0 {
atomic.Xadd64(&c.nmalloc, -int64(n))
}
sg := mheap_.sweepgen
stale := s.sweepgen == sg+1
if stale {
......@@ -170,18 +160,23 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// responsibility to sweep it.
//
// Set sweepgen to indicate it's not cached but needs
// sweeping. sweep will set s.sweepgen to indicate s
// is swept.
s.sweepgen = sg - 1
s.sweep(true)
// sweep may have freed objects, so recompute n.
n = cap - int32(s.allocCount)
// sweeping and can't be allocated from. sweep will
// set s.sweepgen to indicate s is swept.
atomic.Store(&s.sweepgen, sg-1)
} else {
// Indicate that s is no longer cached.
s.sweepgen = sg
atomic.Store(&s.sweepgen, sg)
}
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.allocCount)
if n > 0 {
// cacheSpan updated alloc assuming all objects on s
// were going to be allocated. Adjust for any that
// weren't. We must do this before potentially
// sweeping the span.
atomic.Xadd64(&c.nmalloc, -int64(n))
lock(&c.lock)
c.empty.remove(s)
c.nonempty.insert(s)
......@@ -197,6 +192,12 @@ func (c *mcentral) uncacheSpan(s *mspan) {
}
unlock(&c.lock)
}
if stale {
// Now that s is in the right mcentral list, we can
// sweep it.
s.sweep(false)
}
}
// freeSpan updates c and s after sweeping s.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment