Commit 8d321625 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: fix spans corruption

The problem was that spans end up in wrong lists after split
(e.g. in h->busy instead of h->central->empty).
Also the span can be non-swept before split,
I don't know what it can cause, but it's safer to operate on swept spans.
Fixes #7544.

R=golang-codereviews, rsc
CC=golang-codereviews, khr
https://golang.org/cl/76160043
parent f210fd1f
...@@ -1679,7 +1679,7 @@ runtime·MSpan_EnsureSwept(MSpan *s) ...@@ -1679,7 +1679,7 @@ runtime·MSpan_EnsureSwept(MSpan *s)
// Caller must disable preemption. // Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again // Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine). // (if GC is triggered on another goroutine).
if(m->locks == 0 && m->mallocing == 0) if(m->locks == 0 && m->mallocing == 0 && g != m->g0)
runtime·throw("MSpan_EnsureSwept: m is not locked"); runtime·throw("MSpan_EnsureSwept: m is not locked");
sg = runtime·mheap.sweepgen; sg = runtime·mheap.sweepgen;
......
...@@ -846,48 +846,86 @@ void ...@@ -846,48 +846,86 @@ void
runtime·MHeap_SplitSpan(MHeap *h, MSpan *s) runtime·MHeap_SplitSpan(MHeap *h, MSpan *s)
{ {
MSpan *t; MSpan *t;
MCentral *c;
uintptr i; uintptr i;
uintptr npages; uintptr npages;
PageID p; PageID p;
if((s->npages & 1) != 0)
runtime·throw("MHeap_SplitSpan on an odd size span");
if(s->state != MSpanInUse) if(s->state != MSpanInUse)
runtime·throw("MHeap_SplitSpan on a free span"); runtime·throw("MHeap_SplitSpan on a free span");
if(s->sizeclass != 0 && s->ref != 1) if(s->sizeclass != 0 && s->ref != 1)
runtime·throw("MHeap_SplitSpan doesn't have an allocated object"); runtime·throw("MHeap_SplitSpan doesn't have an allocated object");
npages = s->npages; npages = s->npages;
runtime·lock(h); // remove the span from whatever list it is in now
if(s->sizeclass > 0) {
// must be in h->central[x].empty
c = &h->central[s->sizeclass];
runtime·lock(c);
runtime·MSpanList_Remove(s);
runtime·unlock(c);
runtime·lock(h);
} else {
// must be in h->busy/busylarge
runtime·lock(h);
runtime·MSpanList_Remove(s);
}
// heap is locked now
if(npages == 1) {
// convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
s->ref = 2;
s->sizeclass = runtime·SizeToClass(PageSize/2);
s->elemsize = PageSize/2;
} else {
// convert span of n>1 pages into two spans of n/2 pages each.
if((s->npages & 1) != 0)
runtime·throw("MHeap_SplitSpan on an odd size span");
// compute position in h->spans
p = s->start;
p -= (uintptr)h->arena_start >> PageShift;
// Allocate a new span for the first half.
t = runtime·FixAlloc_Alloc(&h->spanalloc);
runtime·MSpan_Init(t, s->start, npages/2);
t->limit = (byte*)((t->start + npages/2) << PageShift);
t->state = MSpanInUse;
t->elemsize = npages << (PageShift - 1);
t->sweepgen = s->sweepgen;
if(t->elemsize <= MaxSmallSize) {
t->sizeclass = runtime·SizeToClass(t->elemsize);
t->ref = 1;
}
// compute position in h->spans // the old span holds the second half.
p = s->start; s->start += npages/2;
p -= (uintptr)h->arena_start >> PageShift; s->npages = npages/2;
s->elemsize = npages << (PageShift - 1);
if(s->elemsize <= MaxSmallSize) {
s->sizeclass = runtime·SizeToClass(s->elemsize);
s->ref = 1;
}
// Allocate a new span for the first half. // update span lookup table
t = runtime·FixAlloc_Alloc(&h->spanalloc); for(i = p; i < p + npages/2; i++)
runtime·MSpan_Init(t, s->start, npages/2); h->spans[i] = t;
t->limit = (byte*)((t->start + npages/2) << PageShift);
t->state = MSpanInUse;
t->elemsize = npages << (PageShift - 1);
t->sweepgen = s->sweepgen;
if(t->elemsize <= MaxSmallSize) {
t->sizeclass = runtime·SizeToClass(t->elemsize);
t->ref = 1;
} }
// the old span holds the second half. // place the span into a new list
s->start += npages/2; if(s->sizeclass > 0) {
s->npages = npages/2; runtime·unlock(h);
s->elemsize = npages << (PageShift - 1); c = &h->central[s->sizeclass];
if(s->elemsize <= MaxSmallSize) { runtime·lock(c);
s->sizeclass = runtime·SizeToClass(s->elemsize); // swept spans are at the end of the list
s->ref = 1; runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(c);
} else {
// Swept spans are at the end of lists.
if(s->npages < nelem(h->free))
runtime·MSpanList_InsertBack(&h->busy[s->npages], s);
else
runtime·MSpanList_InsertBack(&h->busylarge, s);
runtime·unlock(h);
} }
// update span lookup table
for(i = p; i < p + npages/2; i++)
h->spans[i] = t;
runtime·unlock(h);
} }
...@@ -838,15 +838,7 @@ runtime·shrinkstack(G *gp) ...@@ -838,15 +838,7 @@ runtime·shrinkstack(G *gp)
// First, we trick malloc into thinking // First, we trick malloc into thinking
// we allocated the stack as two separate half-size allocs. Then the // we allocated the stack as two separate half-size allocs. Then the
// free() call does the rest of the work for us. // free() call does the rest of the work for us.
if(oldsize == PageSize) { runtime·MSpan_EnsureSwept(span);
// convert span of 1 PageSize object to a span of 2 runtime·MHeap_SplitSpan(&runtime·mheap, span);
// PageSize/2 objects.
span->ref = 2;
span->sizeclass = runtime·SizeToClass(PageSize/2);
span->elemsize = PageSize/2;
} else {
// convert span of n>1 pages into two spans of n/2 pages each.
runtime·MHeap_SplitSpan(&runtime·mheap, span);
}
runtime·free(oldstk); runtime·free(oldstk);
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment