Commit fb94be55 authored by Russ Cox's avatar Russ Cox

runtime: tighten garbage collector

 * specialize sweepspan as sweepspan0 and sweepspan1.
 * in sweepspan1, inline "free" to avoid expensive mlookup.

R=iant
CC=golang-dev
https://golang.org/cl/206060
parent 991a968f
...@@ -321,6 +321,7 @@ MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass); ...@@ -321,6 +321,7 @@ MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
void MHeap_Free(MHeap *h, MSpan *s); void MHeap_Free(MHeap *h, MSpan *s);
MSpan* MHeap_Lookup(MHeap *h, PageID p); MSpan* MHeap_Lookup(MHeap *h, PageID p);
MSpan* MHeap_LookupMaybe(MHeap *h, PageID p); MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
void MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed); void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref); int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
......
...@@ -157,6 +157,19 @@ MCentral_Free(MCentral *c, void *v) ...@@ -157,6 +157,19 @@ MCentral_Free(MCentral *c, void *v)
} }
} }
void
MGetSizeClassInfo(int32 sizeclass, int32 *sizep, int32 *npagesp, int32 *nobj)
{
int32 size;
int32 npages;
npages = class_to_allocnpages[sizeclass];
size = class_to_size[sizeclass];
*npagesp = npages;
*sizep = size;
*nobj = (npages << PageShift) / (size + RefcountOverhead);
}
// Fetch a new span from the heap and // Fetch a new span from the heap and
// carve into objects for the free list. // carve into objects for the free list.
static bool static bool
...@@ -168,7 +181,7 @@ MCentral_Grow(MCentral *c) ...@@ -168,7 +181,7 @@ MCentral_Grow(MCentral *c)
MSpan *s; MSpan *s;
unlock(c); unlock(c);
npages = class_to_allocnpages[c->sizeclass]; MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
s = MHeap_Alloc(&mheap, npages, c->sizeclass); s = MHeap_Alloc(&mheap, npages, c->sizeclass);
if(s == nil) { if(s == nil) {
// TODO(rsc): Log out of memory // TODO(rsc): Log out of memory
...@@ -179,8 +192,6 @@ MCentral_Grow(MCentral *c) ...@@ -179,8 +192,6 @@ MCentral_Grow(MCentral *c)
// Carve span into sequence of blocks. // Carve span into sequence of blocks.
tailp = &s->freelist; tailp = &s->freelist;
p = (byte*)(s->start << PageShift); p = (byte*)(s->start << PageShift);
size = class_to_size[c->sizeclass];
n = (npages << PageShift) / (size + RefcountOverhead);
s->gcref = (uint32*)(p + size*n); s->gcref = (uint32*)(p + size*n);
for(i=0; i<n; i++) { for(i=0; i<n; i++) {
v = (MLink*)p; v = (MLink*)p;
......
...@@ -139,79 +139,115 @@ mark(void) ...@@ -139,79 +139,115 @@ mark(void)
} }
} }
// pass 0: mark RefNone with finalizer as RefFinalize and trace
static void static void
sweepspan(MSpan *s, int32 pass) sweepspan0(MSpan *s)
{ {
int32 i, n, npages, size;
byte *p; byte *p;
uint32 ref, *gcrefp, *gcrefep;
if(s->state != MSpanInUse) int32 n, size, npages;
return;
p = (byte*)(s->start << PageShift); p = (byte*)(s->start << PageShift);
if(s->sizeclass == 0) { if(s->sizeclass == 0) {
// Large block. // Large block.
sweepblock(p, (uint64)s->npages<<PageShift, &s->gcref0, pass); ref = s->gcref0;
if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
// Mark as finalizable.
s->gcref0 = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
if(!(ref & RefNoPointers))
scanblock(100, p, s->npages<<PageShift);
}
return; return;
} }
// Chunk full of small blocks. // Chunk full of small blocks.
// Must match computation in MCentral_Grow. MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
size = class_to_size[s->sizeclass]; gcrefp = s->gcref;
npages = class_to_allocnpages[s->sizeclass]; gcrefep = s->gcref + n;
n = (npages << PageShift) / (size + RefcountOverhead); for(; gcrefp < gcrefep; gcrefp++) {
for(i=0; i<n; i++) ref = *gcrefp;
sweepblock(p+i*size, size, &s->gcref[i], pass); if((ref&~RefNoPointers) == (RefNone|RefHasFinalizer)) {
} // Mark as finalizable.
*gcrefp = RefFinalize | RefHasFinalizer | (ref&RefNoPointers);
if(!(ref & RefNoPointers))
scanblock(100, p+(gcrefp-s->gcref)*size, size);
}
}
}
// pass 1: free RefNone, queue RefFinalize, reset RefSome
static void static void
sweepblock(byte *p, int64 n, uint32 *gcrefp, int32 pass) sweepspan1(MSpan *s)
{ {
uint32 gcref; int32 n, npages, size;
byte *p;
gcref = *gcrefp; uint32 ref, *gcrefp, *gcrefep;
switch(gcref & ~(RefNoPointers|RefHasFinalizer)) { MCache *c;
default:
throw("bad 'ref count'"); p = (byte*)(s->start << PageShift);
case RefFree: if(s->sizeclass == 0) {
case RefStack: // Large block.
break; ref = s->gcref0;
case RefNone: switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
if(pass == 0 && (gcref & RefHasFinalizer)) { case RefNone:
// Tentatively mark as finalizable. // Free large object.
// Make sure anything it points at will not be collected. mstats.alloc -= s->npages<<PageShift;
if(Debug > 0) runtime_memclr(p, s->npages<<PageShift);
printf("maybe finalize %p+%D\n", p, n); s->gcref0 = RefFree;
*gcrefp = RefFinalize | RefHasFinalizer | (gcref&RefNoPointers); MHeap_Free(&mheap, s);
scanblock(100, p, n); break;
} else if(pass == 1) { case RefFinalize:
if(Debug > 0) if(pfinq < efinq) {
printf("free %p+%D\n", p, n); pfinq->p = p;
free(p); pfinq->nret = 0;
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
ref &= ~RefHasFinalizer;
if(pfinq->fn == nil)
throw("finalizer inconsistency");
pfinq++;
}
// fall through
case RefSome:
s->gcref0 = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
break;
} }
break; return;
case RefFinalize: }
if(pass != 1)
throw("sweepspan pass 0 RefFinalize"); // Chunk full of small blocks.
if(pfinq < efinq) { MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
if(Debug > 0) gcrefp = s->gcref;
printf("finalize %p+%D\n", p, n); gcrefep = s->gcref + n;
pfinq->p = p; for(; gcrefp < gcrefep; gcrefp++, p += size) {
pfinq->nret = 0; ref = *gcrefp;
pfinq->fn = getfinalizer(p, 1, &pfinq->nret); if(ref < RefNone) // RefFree or RefStack
gcref &= ~RefHasFinalizer; continue;
if(pfinq->fn == nil) switch(ref & ~(RefNoPointers|RefHasFinalizer)) {
throw("getfinalizer inconsistency"); case RefNone:
pfinq++; // Free small object.
*gcrefp = RefFree;
c = m->mcache;
if(size > sizeof(uintptr))
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
mstats.alloc -= size;
mstats.by_size[s->sizeclass].nfree++;
MCache_Free(c, p, s->sizeclass, size);
break;
case RefFinalize:
if(pfinq < efinq) {
pfinq->p = p;
pfinq->nret = 0;
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
ref &= ~RefHasFinalizer;
if(pfinq->fn == nil)
throw("finalizer inconsistency");
pfinq++;
}
// fall through
case RefSome:
*gcrefp = RefNone | (ref&(RefNoPointers|RefHasFinalizer));
break;
} }
// Reset for next mark+sweep.
*gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
break;
case RefSome:
// Reset for next mark+sweep.
if(pass == 1)
*gcrefp = RefNone | (gcref&(RefNoPointers|RefHasFinalizer));
break;
} }
} }
...@@ -222,11 +258,13 @@ sweep(void) ...@@ -222,11 +258,13 @@ sweep(void)
// Sweep all the spans marking blocks to be finalized. // Sweep all the spans marking blocks to be finalized.
for(s = mheap.allspans; s != nil; s = s->allnext) for(s = mheap.allspans; s != nil; s = s->allnext)
sweepspan(s, 0); if(s->state == MSpanInUse)
sweepspan0(s);
// Sweep again queueing finalizers and freeing the others. // Sweep again queueing finalizers and freeing the others.
for(s = mheap.allspans; s != nil; s = s->allnext) for(s = mheap.allspans; s != nil; s = s->allnext)
sweepspan(s, 1); if(s->state == MSpanInUse)
sweepspan1(s);
} }
// Semaphore, not Lock, so that the goroutine // Semaphore, not Lock, so that the goroutine
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment