Commit eff2b262 authored by Austin Clements's avatar Austin Clements

runtime: make it possible to exit Go-created threads

Currently, threads created by the runtime exist until the whole
program exits. For #14592 and #20395, we want to be able to exit and
clean up threads created by the runtime. This commit implements that
mechanism.

The main difficulty is how to clean up the g0 stack. In cgo mode and
on Solaris and Windows where the OS manages thread stacks, we simply
arrange to return from mstart and let the system clean up the thread.
If the runtime allocated the g0 stack, then we use a new exitThread
syscall wrapper that arranges to clear a flag in the M once the stack
can safely be reaped and call the thread termination syscall.

exitThread is based on the existing exit1 wrapper, which was always
meant to terminate the calling thread. However, exit1 has never been
used since it was introduced 9 years ago, so it was broken on several
platforms. exitThread also has the additional complication of having
to flag that the stack is unused, which requires some tricks on
platforms that use the stack for syscalls.

This still leaves the problem of how to reap the unused g0 stacks. For
this, we move the M from allm to a new freem list as part of the M
exiting. Later, allocm scans the freem list, finds Ms that are marked
as done with their stack, removes these from the list and frees their
g0 stacks. This also allows these Ms to be garbage collected.

This CL does not yet use any of this functionality. Follow-up CLs
will. Likewise, there are no new tests in this CL because we'll need
follow-up functionality to test it.

Change-Id: Ic851ee74227b6d39c6fc1219fc71b45d3004bc63
Reviewed-on: https://go-review.googlesource.com/46037
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarKeith Randall <khr@golang.org>
parent a9c3d09d
......@@ -98,6 +98,10 @@ _cgo_try_pthread_create(pthread_t* thread, const pthread_attr_t* attr, void* (*p
for (tries = 0; tries < 20; tries++) {
err = pthread_create(thread, attr, pfn, arg);
if (err == 0) {
pthread_detach(*thread);
return 0;
}
if (err != EAGAIN) {
return err;
}
......
......@@ -181,6 +181,12 @@ func newosproc(mp *m, _ unsafe.Pointer) {
}
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Solaris because we let
// libc clean up threads.
throw("exitThread")
}
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
......
......@@ -168,6 +168,9 @@ func newosproc(mp *m, stk unsafe.Pointer) {
}
}
//go:noescape
func exitThread(wait *uint32)
//go:nosplit
func semacreate(mp *m) {
if mp.waitsema != 0 {
......
......@@ -21,6 +21,9 @@ const (
_UC_SIGMASK = 0x01
_UC_CPU = 0x04
// From <sys/lwp.h>
_LWP_DETACHED = 0x00000040
_EAGAIN = 35
)
......@@ -182,7 +185,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
......
......@@ -421,6 +421,12 @@ func newosproc(mp *m, stk unsafe.Pointer) {
}
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Plan 9 because we let
// the OS clean up threads.
throw("exitThread")
}
//go:nosplit
func semacreate(mp *m) {
}
......
......@@ -640,6 +640,9 @@ func newosproc(mp *m, stk unsafe.Pointer) {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")\n")
throw("runtime.newosproc")
}
// Close thandle to avoid leaking the thread object if it exits.
stdcall1(_CloseHandle, thandle)
}
// Used by the C library build mode. On Linux this function would allocate a
......@@ -651,6 +654,12 @@ func newosproc0(mp *m, stk unsafe.Pointer) {
newosproc(mp, stk)
}
func exitThread(wait *uint32) {
// We should never reach exitThread on Windows because we let
// the OS clean up threads.
throw("exitThread")
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
......
......@@ -1152,7 +1152,8 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
func mstart() {
_g_ := getg()
if _g_.stack.lo == 0 {
osStack := _g_.stack.lo == 0
if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
size := _g_.stack.hi
......@@ -1166,21 +1167,30 @@ func mstart() {
// both Go and C functions with stack growth prologues.
_g_.stackguard0 = _g_.stack.lo + _StackGuard
_g_.stackguard1 = _g_.stackguard0
mstart1()
mstart1(0)
// Exit this thread.
if GOOS == "windows" || GOOS == "solaris" {
// Windows and Solaris always system-allocate the
// stack, but put it in _g_.stack before mstart, so
// the logic above hasn't set osStack yet.
osStack = true
}
mexit(osStack)
}
func mstart1() {
func mstart1(dummy int32) {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("bad runtime·mstart")
}
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
gosave(&_g_.m.g0.sched)
_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
// Record the caller for use as the top of stack in mcall and
// for terminating the thread.
// We're never coming back to mstart1 after we call schedule,
// so other calls can reuse the current frame.
save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
asminit()
minit()
......@@ -1219,6 +1229,99 @@ func mstartm0() {
initsig(false)
}
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
// unwind the stack to the point that exits the thread.
//
// It is entered with m.p != nil, so write barriers are allowed. It
// will release the P before exiting.
//
//go:yeswritebarrierrec
func mexit(osStack bool) {
g := getg()
m := g.m
if m == &m0 {
// This is the main thread. Just wedge it.
//
// On Linux, exiting the main thread puts the process
// into a non-waitable zombie state. On Plan 9,
// exiting the main thread unblocks wait even though
// other threads are still running. On Solaris we can
// neither exitThread nor return from mstart. Other
// bad things probably happen on other platforms.
//
// We could try to clean up this M more before wedging
// it, but that complicates signal handling.
handoffp(releasep())
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
notesleep(&m.park)
throw("locked m0 woke up")
}
sigblock()
unminit()
// Free the gsignal stack.
if m.gsignal != nil {
stackfree(m.gsignal.stack)
}
// Remove m from allm.
lock(&sched.lock)
for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
if *pprev == m {
*pprev = m.alllink
goto found
}
}
throw("m not found in allm")
found:
if !osStack {
// Delay reaping m until it's done with the stack.
//
// If this is using an OS stack, the OS will free it
// so there's no need for reaping.
atomic.Store(&m.freeWait, 1)
// Put m on the free list, though it will not be reaped until
// freeWait is 0. Note that the free list must not be linked
// through alllink because some functions walk allm without
// locking, so may be using alllink.
m.freelink = sched.freem
sched.freem = m
}
unlock(&sched.lock)
// Release the P.
handoffp(releasep())
// After this point we must not have write barriers.
// Invoke the deadlock detector. This must happen after
// handoffp because it may have started a new M to take our
// P's work.
lock(&sched.lock)
sched.nmfreed++
checkdead()
unlock(&sched.lock)
if osStack {
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
return
}
// mstart is the thread's entry point, so there's nothing to
// return to. Exit the thread directly. exitThread will clear
// m.freeWait when it's done with the stack and the m can be
// reaped.
exitThread(&m.freeWait)
}
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
......@@ -1364,6 +1467,27 @@ func allocm(_p_ *p, fn func()) *m {
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
// Release the free M list. We need to do this somewhere and
// this may free up a stack we can use.
if sched.freem != nil {
lock(&sched.lock)
var newList *m
for freem := sched.freem; freem != nil; {
if freem.freeWait != 0 {
next := freem.freelink
freem.freelink = newList
newList = freem
freem = next
continue
}
stackfree(freem.g0.stack)
freem = freem.freelink
}
sched.freem = newList
unlock(&sched.lock)
}
mp := new(m)
mp.mstartfn = fn
mcommoninit(mp)
......@@ -3377,7 +3501,7 @@ func gcount() int32 {
}
func mcount() int32 {
return int32(sched.mnext)
return int32(sched.mnext - sched.nmfreed)
}
var prof struct {
......@@ -3902,6 +4026,7 @@ func incidlelocked(v int32) {
// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
// sched.lock must be held.
func checkdead() {
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
......
......@@ -169,9 +169,13 @@ func efaceOf(ep *interface{}) *eface {
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
// Gs, Ms, and Ps are always reachable via true pointers in the
// allgs, allm, and allp lists or (during allocation before they reach those lists)
// Gs and Ps are always reachable via true pointers in the
// allgs and allp lists or (during allocation before they reach those lists)
// from stack variables.
//
// Ms are always reachable via true pointers either from allm or
// freem. Unlike Gs and Ps we do free Ms, so it's important that
// nothing ever hold an muintptr across a safe point.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
......@@ -221,6 +225,15 @@ func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
//go:nosplit
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
// muintptr is a *m that is not tracked by the garbage collector.
//
// Because we do free Ms, there are some additional constrains on
// muintptrs:
//
// 1. Never hold an muintptr locally across a safe point.
//
// 2. Any muintptr in the heap must be owned by the M itself so it can
// ensure it is not in use when the last true *m is released.
type muintptr uintptr
//go:nosplit
......@@ -413,7 +426,8 @@ type m struct {
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
incgo bool // m is executing a cgo call
incgo bool // m is executing a cgo call
freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
fastrand [2]uint32
needextram bool
traceback uint8
......@@ -440,6 +454,7 @@ type m struct {
startingtrace bool
syscalltick uint32
thread uintptr // thread handle
freelink *m // on sched.freem
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
......@@ -528,12 +543,16 @@ type schedt struct {
lock mutex
// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
// sure to call checkdead().
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
nmfreed int64 // cumulative number of freed m's
ngsys uint32 // number of system goroutines; updated atomically
......@@ -560,6 +579,10 @@ type schedt struct {
deferlock mutex
deferpool [5]*_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
freem *m
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
......
......@@ -136,7 +136,6 @@ func gosave(buf *gobuf)
//go:noescape
func jmpdefer(fv *funcval, argp uintptr)
func exit1(code int32)
func asminit()
func setg(gg *g)
func breakpoint()
......
......@@ -25,3 +25,9 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
func open(name *byte, mode, perm int32) int32
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
// exitThread terminates the current thread, writing *wait = 0 when
// the stack is safe to reclaim.
//
//go:noescape
func exitThread(wait *uint32)
......@@ -19,7 +19,7 @@ TEXT runtime·exit(SB),NOSPLIT,$0
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
TEXT runtime·exit1(SB),NOSPLIT,$16-0
TEXT exit1<>(SB),NOSPLIT,$16-0
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVL $0, 0(SP)
......@@ -32,6 +32,26 @@ TEXT runtime·exit1(SB),NOSPLIT,$16-0
MOVL $0xf1, 0xf1 // crash
RET
GLOBL exitStack<>(SB),RODATA,$(4*4)
DATA exitStack<>+0x00(SB)/4, $0
DATA exitStack<>+0x04(SB)/4, $0
DATA exitStack<>+0x08(SB)/4, $0
DATA exitStack<>+0x0c(SB)/4, $0
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
// __bsdthread_terminate takes 4 arguments, which it expects
// on the stack. They should all be 0, so switch over to a
// fake stack of 0s. It won't write to the stack.
MOVL $exitStack<>(SB), SP
MOVL $361, AX // __bsdthread_terminate
INT $0x80
MOVL $0xf1, 0xf1 // crash
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVL $5, AX
INT $0x80
......@@ -400,7 +420,7 @@ TEXT runtime·bsdthread_start(SB),NOSPLIT,$0
MOVL BX, m_procid(DX) // m->procid = thread port (for debuggers)
CALL runtime·stackcheck(SB) // smashes AX
CALL CX // fn()
CALL runtime·exit1(SB)
CALL exit1<>(SB)
RET
// func bsdthread_register() int32
......
......@@ -25,7 +25,8 @@ TEXT runtime·exit(SB),NOSPLIT,$0
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
TEXT runtime·exit1(SB),NOSPLIT,$0
TEXT exit1<>(SB),NOSPLIT,$0
// Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVL $0, DI
......@@ -37,7 +38,12 @@ TEXT runtime·exit1(SB),NOSPLIT,$0
MOVL $0xf1, 0xf1 // crash
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
JMP exit1<>(SB)
TEXT runtime·open(SB),NOSPLIT,$0
MOVQ name+0(FP), DI // arg 1 pathname
......@@ -460,7 +466,7 @@ TEXT runtime·bsdthread_start(SB),NOSPLIT,$0
MOVQ CX, g_m(AX)
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL DX // fn
CALL runtime·exit1(SB)
CALL exit1<>(SB)
RET
// func bsdthread_register() int32
......
......@@ -89,7 +89,8 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
TEXT runtime·exit1(SB),NOSPLIT,$0
TEXT exit1<>(SB),NOSPLIT,$0
// Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVW $0, R0
......@@ -102,6 +103,18 @@ TEXT runtime·exit1(SB),NOSPLIT,$0
MOVW $1003, R1
MOVW R0, (R1) // fail hard
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVW wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
storeloop:
LDREX (R0), R4 // loads R4
STREX R1, (R0), R1 // stores R2
CMP $0, R1
BNE storeloop
JMP exit1<>(SB)
TEXT runtime·raise(SB),NOSPLIT,$0
// Ideally we'd send the signal to the current thread,
// not the whole process, but that's too hard on OS X.
......@@ -376,7 +389,7 @@ TEXT runtime·bsdthread_start(SB),NOSPLIT,$0
EOR R12, R12
WORD $0xeee1ca10 // fmxr fpscr, ip
BL (R2) // fn
BL runtime·exit1(SB)
BL exit1<>(SB)
RET
// int32 bsdthread_register(void)
......
......@@ -89,7 +89,8 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
TEXT runtime·exit1(SB),NOSPLIT,$0
TEXT exit1<>(SB),NOSPLIT,$0
// Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVW $0, R0
......@@ -102,6 +103,14 @@ TEXT runtime·exit1(SB),NOSPLIT,$0
MOVD $1003, R1
MOVD R0, (R1) // fail hard
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVD wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
STLRW R1, (R0)
JMP exit1<>(SB)
TEXT runtime·raise(SB),NOSPLIT,$0
// Ideally we'd send the signal to the current thread,
// not the whole process, but that's too hard on OS X.
......
......@@ -64,12 +64,18 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8
MOVL code+0(FP), DI // arg 1 exit status
MOVL $431, AX
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $0x10000, DI // arg 1 how - EXTEXIT_LWP
MOVL $0, SI // arg 2 status
MOVL $0, DX // arg 3 addr
MOVL $494, AX // extexit
SYSCALL
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
......
......@@ -52,12 +52,23 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-4
MOVL $431, AX
GLOBL exitStack<>(SB),RODATA,$8
DATA exitStack<>+0x00(SB)/4, $0
DATA exitStack<>+0x04(SB)/4, $0
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
// thr_exit takes a single pointer argument, which it expects
// on the stack. We want to pass 0, so switch over to a fake
// stack of 0s. It won't write to the stack.
MOVL $exitStack<>(SB), SP
MOVL $431, AX // thr_exit
INT $0x80
JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
......
......@@ -54,12 +54,16 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8
MOVL code+0(FP), DI // arg 1 exit status
MOVL $431, AX
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $0, DI // arg 1 long *state
MOVL $431, AX // thr_exit
SYSCALL
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
......
......@@ -82,13 +82,22 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
MOVW.CS R8, (R8)
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8
MOVW code+0(FP), R0 // arg 1 exit status
MOVW $SYS_thr_exit, R7
SWI $0
MOVW.CS $0, R8 // crash on syscall failure
MOVW.CS R8, (R8)
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVW wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
storeloop:
LDREX (R0), R4 // loads R4
STREX R1, (R0), R1 // stores R2
CMP $0, R1
BNE storeloop
MOVW $0, R0 // arg 1 long *state
MOVW $SYS_thr_exit, R7
SWI $0
MOVW.CS $0, R8 // crash on syscall failure
MOVW.CS R8, (R8)
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVW name+0(FP), R0 // arg 1 name
......
......@@ -67,13 +67,25 @@ TEXT runtime·exit(SB),NOSPLIT,$0
INT $3 // not reached
RET
TEXT runtime·exit1(SB),NOSPLIT,$0
TEXT exit1<>(SB),NOSPLIT,$0
MOVL $SYS_exit, AX
MOVL code+0(FP), BX
INVOKE_SYSCALL
INT $3 // not reached
RET
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $1, AX // exit (just this thread)
MOVL $0, BX // exit code
INT $0x80 // no stack; must not use CALL
// We may not even have a stack any more.
INT $3
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVL $SYS_open, AX
MOVL name+0(FP), BX
......@@ -432,7 +444,7 @@ TEXT runtime·clone(SB),NOSPLIT,$0
nog:
CALL SI // fn()
CALL runtime·exit1(SB)
CALL exit1<>(SB)
MOVL $0x1234, 0x1005
TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
......
......@@ -52,11 +52,17 @@ TEXT runtime·exit(SB),NOSPLIT,$0-4
SYSCALL
RET
TEXT runtime·exit1(SB),NOSPLIT,$0-4
MOVL code+0(FP), DI
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $0, DI // exit code
MOVL $SYS_exit, AX
SYSCALL
RET
// We may not even have a stack any more.
INT $3
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0-20
MOVQ name+0(FP), DI
......
......@@ -114,7 +114,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVW $1002, R1
MOVW R0, (R1) // fail hard
TEXT runtime·exit1(SB),NOSPLIT,$-4
TEXT exit1<>(SB),NOSPLIT,$-4
MOVW code+0(FP), R0
MOVW $SYS_exit, R7
SWI $0
......@@ -122,6 +122,22 @@ TEXT runtime·exit1(SB),NOSPLIT,$-4
MOVW $1003, R1
MOVW R0, (R1) // fail hard
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$-4-4
MOVW wait+0(FP), R0
// We're done using the stack.
// Alas, there's no reliable way to make this write atomic
// without potentially using the stack. So it goes.
MOVW $0, R1
MOVW R1, (R0)
MOVW $0, R0 // exit code
MOVW $SYS_exit, R7
SWI $0
MOVW $1234, R0
MOVW $1004, R1
MOVW R0, (R1) // fail hard
JMP 0(PC)
TEXT runtime·gettid(SB),NOSPLIT,$0-4
MOVW $SYS_gettid, R7
SWI $0
......@@ -317,7 +333,7 @@ nog:
SUB $16, R13 // restore the stack pointer to avoid memory corruption
MOVW $0, R0
MOVW R0, 4(R13)
BL runtime·exit1(SB)
BL exit1<>(SB)
MOVW $1234, R0
MOVW $1005, R1
......
......@@ -54,11 +54,16 @@ TEXT runtime·exit(SB),NOSPLIT,$-8-4
SVC
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8-4
MOVW code+0(FP), R0
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$-8-8
MOVD wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
STLRW R1, (R0)
MOVW $0, R0 // exit code
MOVD $SYS_exit, R8
SVC
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8-20
MOVD $AT_FDCWD, R0
......
......@@ -53,11 +53,18 @@ TEXT runtime·exit(SB),NOSPLIT,$-8-4
SYSCALL
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8-4
MOVW code+0(FP), R4
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$-8-8
MOVV wait+0(FP), R1
// We're done using the stack.
MOVW $0, R2
SYNC
MOVW R2, (R1)
SYNC
MOVW $0, R4 // exit code
MOVV $SYS_exit, R2
SYSCALL
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8-20
MOVV name+0(FP), R4
......
......@@ -54,12 +54,19 @@ TEXT runtime·exit(SB),NOSPLIT,$0-4
UNDEF
RET
TEXT runtime·exit1(SB),NOSPLIT,$0-4
MOVW code+0(FP), R4
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVW wait+0(FP), R1
// We're done using the stack.
MOVW $0, R2
SYNC
MOVW R2, (R1)
SYNC
MOVW $0, R4 // exit code
MOVW $SYS_exit, R2
SYSCALL
UNDEF
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0-16
MOVW name+0(FP), R4
......
......@@ -54,10 +54,16 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
SYSCALL $SYS_exit_group
RET
TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
MOVW code+0(FP), R3
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
MOVD wait+0(FP), R1
// We're done using the stack.
MOVW $0, R2
SYNC
MOVW R2, (R1)
MOVW $0, R3 // exit code
SYSCALL $SYS_exit
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
MOVD name+0(FP), R3
......
......@@ -49,11 +49,16 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
SYSCALL
RET
TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
MOVW code+0(FP), R2
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
MOVD wait+0(FP), R1
// We're done using the stack.
MOVW $0, R2
MOVW R2, (R1)
MOVW $0, R2 // exit code
MOVW $SYS_exit, R1
SYSCALL
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
MOVD name+0(FP), R2
......
......@@ -16,11 +16,13 @@ TEXT runtime·exit(SB),NOSPLIT,$4
NACL_SYSCALL(SYS_exit)
JMP 0(PC)
TEXT runtime·exit1(SB),NOSPLIT,$4
MOVL code+0(FP), AX
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$4-4
MOVL wait+0(FP), AX
// SYS_thread_exit will clear *wait when the stack is free.
MOVL AX, 0(SP)
NACL_SYSCALL(SYS_thread_exit)
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$12
MOVL name+0(FP), AX
......
......@@ -19,10 +19,12 @@ TEXT runtime·exit(SB),NOSPLIT,$0
NACL_SYSCALL(SYS_exit)
RET
TEXT runtime·exit1(SB),NOSPLIT,$0
MOVL code+0(FP), DI
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), DI
// SYS_thread_exit will clear *wait when the stack is free.
NACL_SYSCALL(SYS_thread_exit)
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVL name+0(FP), DI
......
......@@ -15,10 +15,12 @@ TEXT runtime·exit(SB),NOSPLIT,$0
NACL_SYSCALL(SYS_exit)
RET
TEXT runtime·exit1(SB),NOSPLIT,$0
MOVW code+0(FP), R0
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$4-4
MOVW wait+0(FP), R0
// SYS_thread_exit will clear *wait when the stack is free.
NACL_SYSCALL(SYS_thread_exit)
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVW name+0(FP), R0
......
......@@ -17,12 +17,15 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-4
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $310, AX // sys__lwp_exit
INT $0x80
JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
......@@ -298,7 +301,7 @@ TEXT runtime·lwp_tramp(SB),NOSPLIT,$0
// Call fn
CALL SI
CALL runtime·exit1(SB)
// fn should never return
MOVL $0x1234, 0x1005
RET
......
......@@ -79,11 +79,15 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVL $310, AX // sys__lwp_exit
SYSCALL
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
......
......@@ -18,12 +18,21 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVW.CS R8, (R8)
RET
TEXT runtime·exit1(SB),NOSPLIT,$-4
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVW wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
storeloop:
LDREX (R0), R4 // loads R4
STREX R1, (R0), R1 // stores R2
CMP $0, R1
BNE storeloop
SWI $0xa00136 // sys__lwp_exit
MOVW $1, R8 // crash
MOVW R8, (R8)
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVW name+0(FP), R0
MOVW mode+4(FP), R1
......
......@@ -19,14 +19,21 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$8
MOVL $0, 0(SP)
MOVL $0, 4(SP) // arg 1 - notdead
GLOBL exitStack<>(SB),RODATA,$8
DATA exitStack<>+0x00(SB)/4, $0
DATA exitStack<>+0x04(SB)/4, $0
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVL wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
// sys__lwp_exit takes 1 argument, which it expects on the stack.
MOVL $exitStack<>(SB), SP
MOVL $302, AX // sys___threxit
INT $0x80
JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
......@@ -308,7 +315,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$12
// Call fn.
CALL SI
CALL runtime·exit1(SB)
// fn should never return.
MOVL $0x1234, 0x1005
RET
......
......@@ -88,12 +88,16 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
MOVL $0xf1, 0xf1 // crash
RET
TEXT runtime·exit1(SB),NOSPLIT,$-8
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
MOVQ wait+0(FP), AX
// We're done using the stack.
MOVL $0, (AX)
MOVQ $0, DI // arg 1 - notdead
MOVL $302, AX // sys___threxit
SYSCALL
MOVL $0xf1, 0xf1 // crash
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
......
......@@ -22,13 +22,22 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
MOVW.CS R8, (R8)
RET
TEXT runtime·exit1(SB),NOSPLIT,$-4
// func exitThread(wait *uint32)
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
MOVW wait+0(FP), R0
// We're done using the stack.
MOVW $0, R1
storeloop:
LDREX (R0), R4 // loads R4
STREX R1, (R0), R1 // stores R2
CMP $0, R1
BNE storeloop
MOVW $0, R0 // arg 1 - notdead
MOVW $302, R12 // sys___threxit
SWI $0
MOVW.CS $1, R8 // crash on syscall failure
MOVW.CS R8, (R8)
RET
JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVW name+0(FP), R0 // arg 1 - path
......@@ -269,7 +278,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$0
// Call fn.
BL (R6)
BL runtime·exit1(SB)
// fn should never return.
MOVW $2, R8 // crash if reached
MOVW R8, (R8)
RET
......
......@@ -139,7 +139,7 @@ TEXT runtime·rfork(SB),NOSPLIT,$0
MOVL AX, ret+4(FP)
RET
TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
TEXT runtime·tstart_plan9(SB),NOSPLIT,$4
MOVL newm+0(FP), CX
MOVL m_g0(CX), DX
......@@ -163,8 +163,10 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL runtime·mstart(SB)
MOVL $0x1234, 0x1234 // not reached
RET
// Exit the thread.
MOVL $0, 0(SP)
CALL runtime·exits(SB)
JMP 0(PC)
// void sigtramp(void *ureg, int8 *note)
TEXT runtime·sigtramp(SB),NOSPLIT,$0
......
......@@ -136,7 +136,7 @@ TEXT runtime·rfork(SB),NOSPLIT,$0
MOVL AX, ret+8(FP)
RET
TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
TEXT runtime·tstart_plan9(SB),NOSPLIT,$8
MOVQ newm+0(FP), CX
MOVQ m_g0(CX), DX
......@@ -160,8 +160,10 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL runtime·mstart(SB)
MOVQ $0x1234, 0x1234 // not reached
RET
// Exit the thread.
MOVQ $0, 0(SP)
CALL runtime·exits(SB)
JMP 0(PC)
// This is needed by asm_amd64.s
TEXT runtime·settls(SB),NOSPLIT,$0
......
......@@ -207,7 +207,7 @@ TEXT runtime·rfork(SB),NOSPLIT,$0-8
RET
//func tstart_plan9(newm *m)
TEXT runtime·tstart_plan9(SB),NOSPLIT,$0-4
TEXT runtime·tstart_plan9(SB),NOSPLIT,$4-4
MOVW newm+0(FP), R1
MOVW m_g0(R1), g
......@@ -226,9 +226,11 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0-4
BL runtime·mstart(SB)
MOVW $0x1234, R0
MOVW R0, 0(R0) // not reached
RET
// Exit the thread.
MOVW $0, R0
MOVW R0, 4(R13)
CALL runtime·exits(SB)
JMP 0(PC)
//func sigtramp(ureg, note unsafe.Pointer)
TEXT runtime·sigtramp(SB),NOSPLIT,$0-8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment