Commit 9cbd2fb1 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: remove locks from netpoll hotpaths

Introduces two-phase goroutine parking mechanism -- prepare to park, commit park.
This mechanism does not require backing mutex to protect wait predicate.
Use it in netpoll. See comment in netpoll.goc for details.
This slightly reduces contention between reader, writer and read/write io notifications;
and just eliminates a bunch of mutex operations from hotpaths, thus making then faster.

benchmark                             old ns/op    new ns/op    delta
BenchmarkTCP4ConcurrentReadWrite           2109         1945   -7.78%
BenchmarkTCP4ConcurrentReadWrite-2         1162         1113   -4.22%
BenchmarkTCP4ConcurrentReadWrite-4          798          755   -5.39%
BenchmarkTCP4ConcurrentReadWrite-8          803          748   -6.85%
BenchmarkTCP4Persistent                    9411         9240   -1.82%
BenchmarkTCP4Persistent-2                  5888         5813   -1.27%
BenchmarkTCP4Persistent-4                  4016         3968   -1.20%
BenchmarkTCP4Persistent-8                  3943         3857   -2.18%

R=golang-codereviews, mikioh.mikioh, gobot, iant, rsc
CC=golang-codereviews, khr
https://golang.org/cl/45700043
parent cb86d867
...@@ -483,6 +483,12 @@ TEXT runtime·xchg(SB), NOSPLIT, $0-8 ...@@ -483,6 +483,12 @@ TEXT runtime·xchg(SB), NOSPLIT, $0-8
XCHGL AX, 0(BX) XCHGL AX, 0(BX)
RET RET
TEXT runtime·xchgp(SB), NOSPLIT, $0-8
MOVL 4(SP), BX
MOVL 8(SP), AX
XCHGL AX, 0(BX)
RET
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL 4(SP), AX MOVL 4(SP), AX
again: again:
......
...@@ -549,6 +549,12 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-16 ...@@ -549,6 +549,12 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-16
XCHGQ AX, 0(BX) XCHGQ AX, 0(BX)
RET RET
TEXT runtime·xchgp(SB), NOSPLIT, $0-16
MOVQ 8(SP), BX
MOVQ 16(SP), AX
XCHGQ AX, 0(BX)
RET
TEXT runtime·procyield(SB),NOSPLIT,$0-0 TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL 8(SP), AX MOVL 8(SP), AX
again: again:
......
...@@ -41,6 +41,19 @@ runtime·xchg(uint32 volatile* addr, uint32 v) ...@@ -41,6 +41,19 @@ runtime·xchg(uint32 volatile* addr, uint32 v)
} }
} }
#pragma textflag NOSPLIT
void*
runtime·xchgp(void* volatile* addr, void* v)
{
void *old;
for(;;) {
old = *addr;
if(runtime·cas(addr, old, v))
return old;
}
}
#pragma textflag NOSPLIT #pragma textflag NOSPLIT
void void
runtime·procyield(uint32 cnt) runtime·procyield(uint32 cnt)
......
...@@ -224,7 +224,7 @@ runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc) ...@@ -224,7 +224,7 @@ runtime·chansend(ChanType *t, Hchan *c, byte *ep, bool *pres, void *pc)
mysg.selgen = NOSELGEN; mysg.selgen = NOSELGEN;
g->param = nil; g->param = nil;
enqueue(&c->sendq, &mysg); enqueue(&c->sendq, &mysg);
runtime·park(runtime·unlock, c, "chan send"); runtime·parkunlock(c, "chan send");
if(g->param == nil) { if(g->param == nil) {
runtime·lock(c); runtime·lock(c);
...@@ -252,7 +252,7 @@ asynch: ...@@ -252,7 +252,7 @@ asynch:
mysg.elem = nil; mysg.elem = nil;
mysg.selgen = NOSELGEN; mysg.selgen = NOSELGEN;
enqueue(&c->sendq, &mysg); enqueue(&c->sendq, &mysg);
runtime·park(runtime·unlock, c, "chan send"); runtime·parkunlock(c, "chan send");
runtime·lock(c); runtime·lock(c);
goto asynch; goto asynch;
...@@ -356,7 +356,7 @@ runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *receive ...@@ -356,7 +356,7 @@ runtime·chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *receive
mysg.selgen = NOSELGEN; mysg.selgen = NOSELGEN;
g->param = nil; g->param = nil;
enqueue(&c->recvq, &mysg); enqueue(&c->recvq, &mysg);
runtime·park(runtime·unlock, c, "chan receive"); runtime·parkunlock(c, "chan receive");
if(g->param == nil) { if(g->param == nil) {
runtime·lock(c); runtime·lock(c);
...@@ -387,7 +387,7 @@ asynch: ...@@ -387,7 +387,7 @@ asynch:
mysg.elem = nil; mysg.elem = nil;
mysg.selgen = NOSELGEN; mysg.selgen = NOSELGEN;
enqueue(&c->recvq, &mysg); enqueue(&c->recvq, &mysg);
runtime·park(runtime·unlock, c, "chan receive"); runtime·parkunlock(c, "chan receive");
runtime·lock(c); runtime·lock(c);
goto asynch; goto asynch;
...@@ -799,6 +799,14 @@ selunlock(Select *sel) ...@@ -799,6 +799,14 @@ selunlock(Select *sel)
} }
} }
static bool
selparkcommit(G *gp, void *sel)
{
USED(gp);
selunlock(sel);
return true;
}
void void
runtime·block(void) runtime·block(void)
{ {
...@@ -971,7 +979,7 @@ loop: ...@@ -971,7 +979,7 @@ loop:
} }
g->param = nil; g->param = nil;
runtime·park((void(*)(Lock*))selunlock, (Lock*)sel, "select"); runtime·park(selparkcommit, sel, "select");
sellock(sel); sellock(sel);
sg = g->param; sg = g->param;
......
...@@ -2307,7 +2307,7 @@ runfinq(void) ...@@ -2307,7 +2307,7 @@ runfinq(void)
finq = nil; finq = nil;
if(fb == nil) { if(fb == nil) {
fingwait = 1; fingwait = 1;
runtime·park(runtime·unlock, &finlock, "finalizer wait"); runtime·parkunlock(&finlock, "finalizer wait");
continue; continue;
} }
runtime·unlock(&finlock); runtime·unlock(&finlock);
......
...@@ -19,21 +19,40 @@ package net ...@@ -19,21 +19,40 @@ package net
// An implementation must call the following function to denote that the pd is ready. // An implementation must call the following function to denote that the pd is ready.
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode); // void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
// goroutines respectively. The semaphore can be in the following states:
// READY - io readiness notification is pending;
// a goroutine consumes the notification by changing the state to nil.
// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
// the goroutine commits to park by changing the state to G pointer,
// or, alternatively, concurrent io notification changes the state to READY,
// or, alternatively, concurrent timeout/close changes the state to nil.
// G pointer - the goroutine is blocked on the semaphore;
// io notification or timeout/close changes the state to READY or nil respectively
// and unparks the goroutine.
// nil - nothing of the above.
#define READY ((G*)1) #define READY ((G*)1)
#define WAIT ((G*)2)
struct PollDesc struct PollDesc
{ {
PollDesc* link; // in pollcache, protected by pollcache.Lock PollDesc* link; // in pollcache, protected by pollcache.Lock
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations.
Lock; // protectes the following fields Lock; // protectes the following fields
uintptr fd; uintptr fd;
bool closing; bool closing;
uintptr seq; // protects from stale timers and ready notifications uintptr seq; // protects from stale timers and ready notifications
G* rg; // G waiting for read or READY (binary semaphore) G* rg; // READY, WAIT, G waiting for read or nil
Timer rt; // read deadline timer (set if rt.fv != nil) Timer rt; // read deadline timer (set if rt.fv != nil)
int64 rd; // read deadline int64 rd; // read deadline
G* wg; // the same for writes G* wg; // READY, WAIT, G waiting for write or nil
Timer wt; Timer wt; // write deadline timer
int64 wd; int64 wd; // write deadline
}; };
static struct static struct
...@@ -47,7 +66,7 @@ static struct ...@@ -47,7 +66,7 @@ static struct
// seq is incremented when deadlines are changed or descriptor is reused. // seq is incremented when deadlines are changed or descriptor is reused.
} pollcache; } pollcache;
static bool netpollblock(PollDesc*, int32); static bool netpollblock(PollDesc*, int32, bool);
static G* netpollunblock(PollDesc*, int32, bool); static G* netpollunblock(PollDesc*, int32, bool);
static void deadline(int64, Eface); static void deadline(int64, Eface);
static void readDeadline(int64, Eface); static void readDeadline(int64, Eface);
...@@ -97,7 +116,6 @@ func runtime_pollClose(pd *PollDesc) { ...@@ -97,7 +116,6 @@ func runtime_pollClose(pd *PollDesc) {
} }
func runtime_pollReset(pd *PollDesc, mode int) (err int) { func runtime_pollReset(pd *PollDesc, mode int) (err int) {
runtime·lock(pd);
err = checkerr(pd, mode); err = checkerr(pd, mode);
if(err) if(err)
goto ret; goto ret;
...@@ -106,11 +124,9 @@ func runtime_pollReset(pd *PollDesc, mode int) (err int) { ...@@ -106,11 +124,9 @@ func runtime_pollReset(pd *PollDesc, mode int) (err int) {
else if(mode == 'w') else if(mode == 'w')
pd->wg = nil; pd->wg = nil;
ret: ret:
runtime·unlock(pd);
} }
func runtime_pollWait(pd *PollDesc, mode int) (err int) { func runtime_pollWait(pd *PollDesc, mode int) (err int) {
runtime·lock(pd);
err = checkerr(pd, mode); err = checkerr(pd, mode);
if(err == 0) { if(err == 0) {
#ifdef GOOS_solaris #ifdef GOOS_solaris
...@@ -119,7 +135,7 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) { ...@@ -119,7 +135,7 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) {
else if(mode == 'w') else if(mode == 'w')
runtime·netpollarmwrite(pd->fd); runtime·netpollarmwrite(pd->fd);
#endif #endif
while(!netpollblock(pd, mode)) { while(!netpollblock(pd, mode, false)) {
err = checkerr(pd, mode); err = checkerr(pd, mode);
if(err != 0) if(err != 0)
break; break;
...@@ -128,11 +144,9 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) { ...@@ -128,11 +144,9 @@ func runtime_pollWait(pd *PollDesc, mode int) (err int) {
// Pretend it has not happened and retry. // Pretend it has not happened and retry.
} }
} }
runtime·unlock(pd);
} }
func runtime_pollWaitCanceled(pd *PollDesc, mode int) { func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
runtime·lock(pd);
#ifdef GOOS_solaris #ifdef GOOS_solaris
if(mode == 'r') if(mode == 'r')
runtime·netpollarmread(pd->fd); runtime·netpollarmread(pd->fd);
...@@ -140,9 +154,8 @@ func runtime_pollWaitCanceled(pd *PollDesc, mode int) { ...@@ -140,9 +154,8 @@ func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
runtime·netpollarmwrite(pd->fd); runtime·netpollarmwrite(pd->fd);
#endif #endif
// wait for ioready, ignore closing or timeouts. // wait for ioready, ignore closing or timeouts.
while(!netpollblock(pd, mode)) while(!netpollblock(pd, mode, true))
; ;
runtime·unlock(pd);
} }
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) { func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
...@@ -197,7 +210,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) { ...@@ -197,7 +210,7 @@ func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
} }
// If we set the new deadline in the past, unblock currently pending IO if any. // If we set the new deadline in the past, unblock currently pending IO if any.
rg = nil; rg = nil;
wg = nil; runtime·atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
if(pd->rd < 0) if(pd->rd < 0)
rg = netpollunblock(pd, 'r', false); rg = netpollunblock(pd, 'r', false);
if(pd->wd < 0) if(pd->wd < 0)
...@@ -217,6 +230,7 @@ func runtime_pollUnblock(pd *PollDesc) { ...@@ -217,6 +230,7 @@ func runtime_pollUnblock(pd *PollDesc) {
runtime·throw("runtime_pollUnblock: already closing"); runtime·throw("runtime_pollUnblock: already closing");
pd->closing = true; pd->closing = true;
pd->seq++; pd->seq++;
runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false); rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false); wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) { if(pd->rt.fv) {
...@@ -247,12 +261,10 @@ runtime·netpollready(G **gpp, PollDesc *pd, int32 mode) ...@@ -247,12 +261,10 @@ runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
G *rg, *wg; G *rg, *wg;
rg = wg = nil; rg = wg = nil;
runtime·lock(pd);
if(mode == 'r' || mode == 'r'+'w') if(mode == 'r' || mode == 'r'+'w')
rg = netpollunblock(pd, 'r', true); rg = netpollunblock(pd, 'r', true);
if(mode == 'w' || mode == 'r'+'w') if(mode == 'w' || mode == 'r'+'w')
wg = netpollunblock(pd, 'w', true); wg = netpollunblock(pd, 'w', true);
runtime·unlock(pd);
if(rg) { if(rg) {
rg->schedlink = *gpp; rg->schedlink = *gpp;
*gpp = rg; *gpp = rg;
...@@ -273,51 +285,75 @@ checkerr(PollDesc *pd, int32 mode) ...@@ -273,51 +285,75 @@ checkerr(PollDesc *pd, int32 mode)
return 0; return 0;
} }
static bool
blockcommit(G *gp, G **gpp)
{
return runtime·casp(gpp, WAIT, gp);
}
// returns true if IO is ready, or false if timedout or closed // returns true if IO is ready, or false if timedout or closed
// waitio - wait only for completed IO, ignore errors
static bool static bool
netpollblock(PollDesc *pd, int32 mode) netpollblock(PollDesc *pd, int32 mode, bool waitio)
{ {
G **gpp; G **gpp, *old;
gpp = &pd->rg; gpp = &pd->rg;
if(mode == 'w') if(mode == 'w')
gpp = &pd->wg; gpp = &pd->wg;
if(*gpp == READY) {
*gpp = nil; // set the gpp semaphore to WAIT
return true; for(;;) {
old = *gpp;
if(old == READY) {
*gpp = nil;
return true;
}
if(old != nil)
runtime·throw("netpollblock: double wait");
if(runtime·casp(gpp, nil, WAIT))
break;
} }
if(*gpp != nil)
runtime·throw("netpollblock: double wait"); // need to recheck error states after setting gpp to WAIT
*gpp = g; // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
runtime·park(runtime·unlock, &pd->Lock, "IO wait"); // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
runtime·lock(pd); if(waitio || checkerr(pd, mode) == 0)
if(g->param) runtime·park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
return true; // be careful to not lose concurrent READY notification
return false; old = runtime·xchgp(gpp, nil);
if(old > WAIT)
runtime·throw("netpollblock: corrupted state");
return old == READY;
} }
static G* static G*
netpollunblock(PollDesc *pd, int32 mode, bool ioready) netpollunblock(PollDesc *pd, int32 mode, bool ioready)
{ {
G **gpp, *old; G **gpp, *old, *new;
gpp = &pd->rg; gpp = &pd->rg;
if(mode == 'w') if(mode == 'w')
gpp = &pd->wg; gpp = &pd->wg;
if(*gpp == READY)
return nil; for(;;) {
if(*gpp == nil) { old = *gpp;
// Only set READY for ioready. runtime_pollWait if(old == READY)
// will check for timeout/cancel before waiting. return nil;
if(old == nil && !ioready) {
// Only set READY for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
return nil;
}
new = nil;
if(ioready) if(ioready)
*gpp = READY; new = READY;
return nil; if(runtime·casp(gpp, old, new))
break;
} }
old = *gpp; if(old > WAIT)
// pass unblock reason onto blocked g return old; // must be G*
old->param = (void*)ioready; return nil;
*gpp = nil;
return old;
} }
static void static void
...@@ -343,14 +379,14 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write) ...@@ -343,14 +379,14 @@ deadlineimpl(int64 now, Eface arg, bool read, bool write)
if(pd->rd <= 0 || pd->rt.fv == nil) if(pd->rd <= 0 || pd->rt.fv == nil)
runtime·throw("deadlineimpl: inconsistent read deadline"); runtime·throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1; pd->rd = -1;
pd->rt.fv = nil; runtime·atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false); rg = netpollunblock(pd, 'r', false);
} }
if(write) { if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read)) if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
runtime·throw("deadlineimpl: inconsistent write deadline"); runtime·throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1; pd->wd = -1;
pd->wt.fv = nil; runtime·atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false); wg = netpollunblock(pd, 'w', false);
} }
runtime·unlock(pd); runtime·unlock(pd);
......
...@@ -1353,10 +1353,10 @@ top: ...@@ -1353,10 +1353,10 @@ top:
execute(gp); execute(gp);
} }
// Puts the current goroutine into a waiting state and unlocks the lock. // Puts the current goroutine into a waiting state and calls unlockf.
// The goroutine can be made runnable again by calling runtime·ready(gp). // If unlockf returns false, the goroutine is resumed.
void void
runtime·park(void(*unlockf)(Lock*), Lock *lock, int8 *reason) runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason)
{ {
m->waitlock = lock; m->waitlock = lock;
m->waitunlockf = unlockf; m->waitunlockf = unlockf;
...@@ -1364,17 +1364,39 @@ runtime·park(void(*unlockf)(Lock*), Lock *lock, int8 *reason) ...@@ -1364,17 +1364,39 @@ runtime·park(void(*unlockf)(Lock*), Lock *lock, int8 *reason)
runtime·mcall(park0); runtime·mcall(park0);
} }
static bool
parkunlock(G *gp, void *lock)
{
USED(gp);
runtime·unlock(lock);
return true;
}
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling runtime·ready(gp).
void
runtime·parkunlock(Lock *lock, int8 *reason)
{
runtime·park(parkunlock, lock, reason);
}
// runtime·park continuation on g0. // runtime·park continuation on g0.
static void static void
park0(G *gp) park0(G *gp)
{ {
bool ok;
gp->status = Gwaiting; gp->status = Gwaiting;
gp->m = nil; gp->m = nil;
m->curg = nil; m->curg = nil;
if(m->waitunlockf) { if(m->waitunlockf) {
m->waitunlockf(m->waitlock); ok = m->waitunlockf(gp, m->waitlock);
m->waitunlockf = nil; m->waitunlockf = nil;
m->waitlock = nil; m->waitlock = nil;
if(!ok) {
gp->status = Grunnable;
execute(gp); // Schedule it back, never returns.
}
} }
if(m->lockedg) { if(m->lockedg) {
stoplockedm(); stoplockedm();
......
...@@ -339,7 +339,7 @@ struct M ...@@ -339,7 +339,7 @@ struct M
GCStats gcstats; GCStats gcstats;
bool racecall; bool racecall;
bool needextram; bool needextram;
void (*waitunlockf)(Lock*); bool (*waitunlockf)(G*, void*);
void* waitlock; void* waitlock;
uintptr settype_buf[1024]; uintptr settype_buf[1024];
...@@ -790,21 +790,6 @@ int32 runtime·read(int32, void*, int32); ...@@ -790,21 +790,6 @@ int32 runtime·read(int32, void*, int32);
int32 runtime·write(int32, void*, int32); int32 runtime·write(int32, void*, int32);
int32 runtime·close(int32); int32 runtime·close(int32);
int32 runtime·mincore(void*, uintptr, byte*); int32 runtime·mincore(void*, uintptr, byte*);
bool runtime·cas(uint32*, uint32, uint32);
bool runtime·cas64(uint64*, uint64, uint64);
bool runtime·casp(void**, void*, void*);
// Don't confuse with XADD x86 instruction,
// this one is actually 'addx', that is, add-and-fetch.
uint32 runtime·xadd(uint32 volatile*, int32);
uint64 runtime·xadd64(uint64 volatile*, int64);
uint32 runtime·xchg(uint32 volatile*, uint32);
uint64 runtime·xchg64(uint64 volatile*, uint64);
uint32 runtime·atomicload(uint32 volatile*);
void runtime·atomicstore(uint32 volatile*, uint32);
void runtime·atomicstore64(uint64 volatile*, uint64);
uint64 runtime·atomicload64(uint64 volatile*);
void* runtime·atomicloadp(void* volatile*);
void runtime·atomicstorep(void* volatile*, void*);
void runtime·jmpdefer(FuncVal*, void*); void runtime·jmpdefer(FuncVal*, void*);
void runtime·exit1(int32); void runtime·exit1(int32);
void runtime·ready(G*); void runtime·ready(G*);
...@@ -845,14 +830,33 @@ uint32 runtime·fastrand1(void); ...@@ -845,14 +830,33 @@ uint32 runtime·fastrand1(void);
void runtime·rewindmorestack(Gobuf*); void runtime·rewindmorestack(Gobuf*);
int32 runtime·timediv(int64, int32, int32*); int32 runtime·timediv(int64, int32, int32*);
void runtime·setmg(M*, G*); // atomic operations
void runtime·newextram(void); bool runtime·cas(uint32*, uint32, uint32);
bool runtime·cas64(uint64*, uint64, uint64);
bool runtime·casp(void**, void*, void*);
// Don't confuse with XADD x86 instruction,
// this one is actually 'addx', that is, add-and-fetch.
uint32 runtime·xadd(uint32 volatile*, int32);
uint64 runtime·xadd64(uint64 volatile*, int64);
uint32 runtime·xchg(uint32 volatile*, uint32);
uint64 runtime·xchg64(uint64 volatile*, uint64);
void* runtime·xchgp(void* volatile*, void*);
uint32 runtime·atomicload(uint32 volatile*);
void runtime·atomicstore(uint32 volatile*, uint32);
void runtime·atomicstore64(uint64 volatile*, uint64);
uint64 runtime·atomicload64(uint64 volatile*);
void* runtime·atomicloadp(void* volatile*);
void runtime·atomicstorep(void* volatile*, void*);
void runtime·setmg(M*, G*);
void runtime·newextram(void);
void runtime·exit(int32); void runtime·exit(int32);
void runtime·breakpoint(void); void runtime·breakpoint(void);
void runtime·gosched(void); void runtime·gosched(void);
void runtime·gosched0(G*); void runtime·gosched0(G*);
void runtime·schedtrace(bool); void runtime·schedtrace(bool);
void runtime·park(void(*)(Lock*), Lock*, int8*); void runtime·park(bool(*)(G*, void*), void*, int8*);
void runtime·parkunlock(Lock*, int8*);
void runtime·tsleep(int64, int8*); void runtime·tsleep(int64, int8*);
M* runtime·newm(void); M* runtime·newm(void);
void runtime·goexit(void); void runtime·goexit(void);
......
...@@ -137,7 +137,7 @@ runtime·semacquire(uint32 volatile *addr, bool profile) ...@@ -137,7 +137,7 @@ runtime·semacquire(uint32 volatile *addr, bool profile)
// Any semrelease after the cansemacquire knows we're waiting // Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep. // (we set nwait above), so go to sleep.
semqueue(root, addr, &s); semqueue(root, addr, &s);
runtime·park(runtime·unlock, root, "semacquire"); runtime·parkunlock(root, "semacquire");
if(cansemacquire(addr)) { if(cansemacquire(addr)) {
if(t0) if(t0)
runtime·blockevent(s.releasetime - t0, 3); runtime·blockevent(s.releasetime - t0, 3);
...@@ -254,7 +254,7 @@ func runtime_Syncsemacquire(s *SyncSema) { ...@@ -254,7 +254,7 @@ func runtime_Syncsemacquire(s *SyncSema) {
else else
s->tail->next = &w; s->tail->next = &w;
s->tail = &w; s->tail = &w;
runtime·park(runtime·unlock, s, "semacquire"); runtime·parkunlock(s, "semacquire");
if(t0) if(t0)
runtime·blockevent(w.releasetime - t0, 2); runtime·blockevent(w.releasetime - t0, 2);
} }
...@@ -288,7 +288,7 @@ func runtime_Syncsemrelease(s *SyncSema, n uint32) { ...@@ -288,7 +288,7 @@ func runtime_Syncsemrelease(s *SyncSema, n uint32) {
else else
s->tail->next = &w; s->tail->next = &w;
s->tail = &w; s->tail = &w;
runtime·park(runtime·unlock, s, "semarelease"); runtime·parkunlock(s, "semarelease");
} else } else
runtime·unlock(s); runtime·unlock(s);
} }
...@@ -76,7 +76,7 @@ runtime·tsleep(int64 ns, int8 *reason) ...@@ -76,7 +76,7 @@ runtime·tsleep(int64 ns, int8 *reason)
t.arg.data = g; t.arg.data = g;
runtime·lock(&timers); runtime·lock(&timers);
addtimer(&t); addtimer(&t);
runtime·park(runtime·unlock, &timers, reason); runtime·parkunlock(&timers, reason);
} }
static FuncVal timerprocv = {timerproc}; static FuncVal timerprocv = {timerproc};
...@@ -222,7 +222,7 @@ timerproc(void) ...@@ -222,7 +222,7 @@ timerproc(void)
if(delta < 0) { if(delta < 0) {
// No timers left - put goroutine to sleep. // No timers left - put goroutine to sleep.
timers.rescheduling = true; timers.rescheduling = true;
runtime·park(runtime·unlock, &timers, "timer goroutine (idle)"); runtime·parkunlock(&timers, "timer goroutine (idle)");
continue; continue;
} }
// At least one timer pending. Sleep until then. // At least one timer pending. Sleep until then.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment