Commit f8c35087 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: fix yet another race in bgsweep

Currently it's possible that bgsweep finishes before all spans
have been swept (we only know that sweeping of all spans has *started*).
In such case bgsweep may fail wake up runfinq goroutine when it needs to.
finq may still be nil at this point, but some finalizers may be queued later.
Make bgsweep to wait for sweeping to *complete*, then it can decide
whether it needs to wake up runfinq for sure.
Update #7533

LGTM=rsc
R=rsc
CC=golang-codereviews
https://golang.org/cl/75960043
parent 40f5e675
...@@ -893,6 +893,7 @@ func SetFinalizer(obj Eface, finalizer Eface) { ...@@ -893,6 +893,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
} }
} }
if(finalizer.type != nil) { if(finalizer.type != nil) {
runtime·createfing();
if(finalizer.type->kind != KindFunc) if(finalizer.type->kind != KindFunc)
goto badfunc; goto badfunc;
ft = (FuncType*)finalizer.type; ft = (FuncType*)finalizer.type;
......
...@@ -571,6 +571,10 @@ void runtime·MProf_TraceGC(void); ...@@ -571,6 +571,10 @@ void runtime·MProf_TraceGC(void);
int32 runtime·gcprocs(void); int32 runtime·gcprocs(void);
void runtime·helpgc(int32 nproc); void runtime·helpgc(int32 nproc);
void runtime·gchelper(void); void runtime·gchelper(void);
void runtime·createfing(void);
G* runtime·wakefing(void);
extern bool runtime·fingwait;
extern bool runtime·fingwake;
void runtime·setprofilebucket(void *p, Bucket *b); void runtime·setprofilebucket(void *p, Bucket *b);
......
...@@ -202,15 +202,17 @@ extern byte ebss[]; ...@@ -202,15 +202,17 @@ extern byte ebss[];
extern byte gcdata[]; extern byte gcdata[];
extern byte gcbss[]; extern byte gcbss[];
static G *fing; static Lock finlock; // protects the following variables
static FinBlock *finq; // list of finalizers that are to be executed static FinBlock *finq; // list of finalizers that are to be executed
static FinBlock *finc; // cache of free blocks static FinBlock *finc; // cache of free blocks
static FinBlock *allfin; // list of all blocks static FinBlock *allfin; // list of all blocks
static int32 fingwait; bool runtime·fingwait;
bool runtime·fingwake;
static Lock gclock; static Lock gclock;
static G* fing;
static void runfinq(void); static void runfinq(void);
static void wakefing(void);
static void bgsweep(void); static void bgsweep(void);
static Workbuf* getempty(Workbuf*); static Workbuf* getempty(Workbuf*);
static Workbuf* getfull(Workbuf*); static Workbuf* getfull(Workbuf*);
...@@ -1652,7 +1654,7 @@ runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType ...@@ -1652,7 +1654,7 @@ runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType
FinBlock *block; FinBlock *block;
Finalizer *f; Finalizer *f;
runtime·lock(&gclock); runtime·lock(&finlock);
if(finq == nil || finq->cnt == finq->cap) { if(finq == nil || finq->cnt == finq->cap) {
if(finc == nil) { if(finc == nil) {
finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys); finc = runtime·persistentalloc(FinBlockSize, 0, &mstats.gc_sys);
...@@ -1672,7 +1674,8 @@ runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType ...@@ -1672,7 +1674,8 @@ runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, PtrType
f->fint = fint; f->fint = fint;
f->ot = ot; f->ot = ot;
f->arg = p; f->arg = p;
runtime·unlock(&gclock); runtime·fingwake = true;
runtime·unlock(&finlock);
} }
void void
...@@ -1899,7 +1902,6 @@ static struct ...@@ -1899,7 +1902,6 @@ static struct
{ {
G* g; G* g;
bool parked; bool parked;
uint32 lastsweepgen;
MSpan** spans; MSpan** spans;
uint32 nspan; uint32 nspan;
...@@ -1914,17 +1916,8 @@ bgsweep(void) ...@@ -1914,17 +1916,8 @@ bgsweep(void)
for(;;) { for(;;) {
while(runtime·sweepone() != -1) { while(runtime·sweepone() != -1) {
gcstats.nbgsweep++; gcstats.nbgsweep++;
if(sweep.lastsweepgen != runtime·mheap.sweepgen) {
// If bgsweep does not catch up for any reason
// (does not finish before next GC),
// we still need to kick off runfinq at least once per GC.
sweep.lastsweepgen = runtime·mheap.sweepgen;
wakefing();
}
runtime·gosched(); runtime·gosched();
} }
// kick off goroutine to run queued finalizers
wakefing();
runtime·lock(&gclock); runtime·lock(&gclock);
if(!runtime·mheap.sweepdone) { if(!runtime·mheap.sweepdone) {
// It's possible if GC has happened between sweepone has // It's possible if GC has happened between sweepone has
...@@ -2277,8 +2270,6 @@ runtime·gc(int32 force) ...@@ -2277,8 +2270,6 @@ runtime·gc(int32 force)
// now that gc is done, kick off finalizer thread if needed // now that gc is done, kick off finalizer thread if needed
if(!ConcurrentSweep) { if(!ConcurrentSweep) {
// kick off goroutine to run queued finalizers
wakefing();
// give the queued finalizers, if any, a chance to run // give the queued finalizers, if any, a chance to run
runtime·gosched(); runtime·gosched();
} }
...@@ -2565,15 +2556,15 @@ runfinq(void) ...@@ -2565,15 +2556,15 @@ runfinq(void)
USED(&ef1); USED(&ef1);
for(;;) { for(;;) {
runtime·lock(&gclock); runtime·lock(&finlock);
fb = finq; fb = finq;
finq = nil; finq = nil;
if(fb == nil) { if(fb == nil) {
fingwait = 1; runtime·fingwait = true;
runtime·parkunlock(&gclock, "finalizer wait"); runtime·parkunlock(&finlock, "finalizer wait");
continue; continue;
} }
runtime·unlock(&gclock); runtime·unlock(&finlock);
if(raceenabled) if(raceenabled)
runtime·racefingo(); runtime·racefingo();
for(; fb; fb=next) { for(; fb; fb=next) {
...@@ -2613,10 +2604,10 @@ runfinq(void) ...@@ -2613,10 +2604,10 @@ runfinq(void)
f->ot = nil; f->ot = nil;
} }
fb->cnt = 0; fb->cnt = 0;
runtime·lock(&gclock); runtime·lock(&finlock);
fb->next = finc; fb->next = finc;
finc = fb; finc = fb;
runtime·unlock(&gclock); runtime·unlock(&finlock);
} }
// Zero everything that's dead, to avoid memory leaks. // Zero everything that's dead, to avoid memory leaks.
...@@ -2632,22 +2623,36 @@ runfinq(void) ...@@ -2632,22 +2623,36 @@ runfinq(void)
} }
} }
static void void
wakefing(void) runtime·createfing(void)
{ {
if(finq == nil) if(fing != nil)
return; return;
// Here we use gclock instead of finlock,
// because newproc1 can allocate, which can cause on-demand span sweep,
// which can queue finalizers, which would deadlock.
runtime·lock(&gclock); runtime·lock(&gclock);
// kick off or wake up goroutine to run queued finalizers
if(fing == nil) if(fing == nil)
fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc); fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc);
else if(fingwait) {
fingwait = 0;
runtime·ready(fing);
}
runtime·unlock(&gclock); runtime·unlock(&gclock);
} }
G*
runtime·wakefing(void)
{
G *res;
res = nil;
runtime·lock(&finlock);
if(runtime·fingwait && runtime·fingwake) {
runtime·fingwait = false;
runtime·fingwake = false;
res = fing;
}
runtime·unlock(&finlock);
return res;
}
void void
runtime·marknogc(void *v) runtime·marknogc(void *v)
{ {
......
...@@ -1144,6 +1144,8 @@ top: ...@@ -1144,6 +1144,8 @@ top:
gcstopm(); gcstopm();
goto top; goto top;
} }
if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
runtime·ready(gp);
// local runq // local runq
gp = runqget(m->p); gp = runqget(m->p);
if(gp) if(gp)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment