Commit e25f19a6 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: introduce entersyscallblock()

In preparation for the new scheduler.

R=golang-dev, rsc
CC=golang-dev
https://golang.org/cl/7386044
parent e5b0bceb
......@@ -360,7 +360,7 @@ getprofile(Profile *p)
return ret;
// Wait for new log.
runtime·entersyscall();
runtime·entersyscallblock();
runtime·notesleep(&p->wait);
runtime·exitsyscall();
runtime·noteclear(&p->wait);
......
......@@ -424,7 +424,7 @@ runtime·MHeap_Scavenger(void)
h = runtime·mheap;
for(k=0;; k++) {
runtime·noteclear(&note);
runtime·entersyscall();
runtime·entersyscallblock();
runtime·notetsleep(&note, tick);
runtime·exitsyscall();
......@@ -438,7 +438,7 @@ runtime·MHeap_Scavenger(void)
runtime·noteclear(&note);
notep = &note;
runtime·newproc1((byte*)forcegchelper, (byte*)&notep, sizeof(notep), 0, runtime·MHeap_Scavenger);
runtime·entersyscall();
runtime·entersyscallblock();
runtime·notesleep(&note);
runtime·exitsyscall();
if(trace)
......
......@@ -1049,6 +1049,59 @@ runtime·entersyscall(void)
schedunlock();
}
// The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
// The hint is ignored at the moment, and it's just a copy of runtime·entersyscall().
#pragma textflag 7
void
runtime·entersyscallblock(void)
{
uint32 v;
if(m->profilehz > 0)
runtime·setprof(false);
// Leave SP around for gc and traceback.
runtime·gosave(&g->sched);
g->gcsp = g->sched.sp;
g->gcstack = g->stackbase;
g->gcguard = g->stackguard;
g->status = Gsyscall;
if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
// runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
// g->gcsp, g->gcguard-StackGuard, g->gcstack);
runtime·throw("entersyscall");
}
// Fast path.
// The slow path inside the schedlock/schedunlock will get
// through without stopping if it does:
// mcpu--
// gwait not true
// waitstop && mcpu <= mcpumax not true
// If we can do the same with a single atomic add,
// then we can skip the locks.
v = runtime·xadd(&runtime·sched.atomic, -1<<mcpuShift);
if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
return;
schedlock();
v = runtime·atomicload(&runtime·sched.atomic);
if(atomic_gwaiting(v)) {
matchmg();
v = runtime·atomicload(&runtime·sched.atomic);
}
if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
runtime·notewakeup(&runtime·sched.stopped);
}
// Re-save sched in case one of the calls
// (notewakeup, matchmg) triggered something using it.
runtime·gosave(&g->sched);
schedunlock();
}
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
......
......@@ -692,6 +692,7 @@ M* runtime·newm(void);
void runtime·goexit(void);
void runtime·asmcgocall(void (*fn)(void*), void*);
void runtime·entersyscall(void);
void runtime·entersyscallblock(void);
void runtime·exitsyscall(void);
G* runtime·newproc1(byte*, byte*, int32, int32, void*);
bool runtime·sigsend(int32 sig);
......
......@@ -105,7 +105,7 @@ func signal_recv() (m uint32) {
new = HASWAITER;
if(runtime·cas(&sig.state, old, new)) {
if (new == HASWAITER) {
runtime·entersyscall();
runtime·entersyscallblock();
runtime·notesleep(&sig);
runtime·exitsyscall();
runtime·noteclear(&sig);
......
......@@ -200,7 +200,7 @@ timerproc(void)
timers.sleeping = true;
runtime·noteclear(&timers.waitnote);
runtime·unlock(&timers);
runtime·entersyscall();
runtime·entersyscallblock();
runtime·notetsleep(&timers.waitnote, delta);
runtime·exitsyscall();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment