Commit 02b8e697 authored by Jeremy Jackins's avatar Jeremy Jackins Committed by Matthew Dempsky

runtime: find a home for orphaned comments

These comments were left behind after runtime.h was converted
from C to Go. I examined the original code and tried to move these
to the places that the most sense.

Change-Id: I8769d60234c0113d682f9de3bd8d6c34c450c188
Reviewed-on: https://go-review.googlesource.com/21969Reviewed-by: 's avatarMatthew Dempsky <mdempsky@google.com>
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 980146bf
......@@ -3,6 +3,9 @@
// license that can be found in the LICENSE file.
// Lock-free stack.
// Initialize head to 0, compare with 0 to test for emptiness.
// The stack does not keep pointers to nodes,
// so they can be garbage collected if there are no other pointers to nodes.
// The following code runs only on g0 stack.
package runtime
......
......@@ -13,13 +13,13 @@ import (
// This implementation depends on OS-specific implementations of
//
// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
// futexsleep(addr *uint32, val uint32, ns int64)
// Atomically,
// if(*addr == val) sleep
// if *addr == val { sleep }
// Might be woken up spuriously; that's allowed.
// Don't sleep longer than ns; ns < 0 means forever.
//
// runtime·futexwakeup(uint32 *addr, uint32 cnt)
// futexwakeup(addr *uint32, cnt uint32)
// If any procs are sleeping on addr, wake up at most cnt.
const (
......
......@@ -13,4 +13,7 @@ package runtime
import "unsafe"
// mmap calls the mmap system call. It is implemented in assembly.
// We only pass the lower 32 bits of file offset to the
// assembly routine; the higher bits (if required), should be provided
// by the assembly routine as 0.
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
......@@ -10,9 +10,7 @@ import (
"unsafe"
)
/*
* defined constants
*/
// defined constants
const (
// G status
//
......@@ -99,6 +97,10 @@ const (
_Pdead
)
// Mutual exclusion locks. In the uncontended case,
// as fast as spin locks (just a few user-level instructions),
// but on the contention path they sleep in the kernel.
// A zeroed Mutex is unlocked (no need to initialize each lock).
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
......@@ -106,6 +108,26 @@ type mutex struct {
key uintptr
}
// sleep and wakeup on one-time events.
// before any calls to notesleep or notewakeup,
// must call noteclear to initialize the Note.
// then, exactly one thread can call notesleep
// and exactly one thread can call notewakeup (once).
// once notewakeup has been called, the notesleep
// will return. future notesleep will return immediately.
// subsequent noteclear must be called only after
// previous notesleep has returned, e.g. it's disallowed
// to call noteclear straight after notewakeup.
//
// notetsleep is like notesleep but wakes up after
// a given number of nanoseconds even if the event
// has not yet happened. if a goroutine uses notetsleep to
// wake up early, it must wait to call noteclear until it
// can be sure that no other goroutine is calling
// notewakeup.
//
// notesleep/notetsleep are generally called on g0,
// notetsleepg is similar to notetsleep but is called on user g.
type note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
......@@ -397,8 +419,8 @@ type m struct {
waittraceskip int
startingtrace bool
syscalltick uint32
//#ifdef GOOS_windows
thread uintptr // thread handle
thread uintptr // thread handle
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
......@@ -406,7 +428,7 @@ type m struct {
libcallsp uintptr
libcallg guintptr
syscall libcall // stores syscall parameters on windows
//#endif
mOS
}
......@@ -530,10 +552,10 @@ type schedt struct {
totaltime int64 // ∫gomaxprocs dt up to procresizetime
}
// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
// The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
// External locks are not recursive; a second lock is silently ignored.
// The upper bits of m->locked record the nesting depth of calls to lockOSThread
// The upper bits of m.locked record the nesting depth of calls to lockOSThread
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
// goroutine is holding the lock during the initialization phase.
......@@ -603,13 +625,6 @@ type forcegcstate struct {
idle uint32
}
/*
* known to compiler
*/
const (
_Structrnd = sys.RegSize
)
// startup_random_data holds random bytes initialized at startup. These come from
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
......@@ -635,9 +650,7 @@ func extendRandom(r []byte, n int) {
}
}
/*
* deferred subroutine calls
*/
// deferred subroutine calls
type _defer struct {
siz int32
started bool
......@@ -648,9 +661,7 @@ type _defer struct {
link *_defer
}
/*
* panics
*/
// panics
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
......@@ -659,10 +670,7 @@ type _panic struct {
aborted bool // the panic was aborted
}
/*
* stack traces
*/
// stack traces
type stkframe struct {
fn *_func // function being run
pc uintptr // program counter within fn
......@@ -682,10 +690,8 @@ const (
_TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
)
const (
// The maximum number of frames we print for a traceback
_TracebackMaxFrames = 100
)
// The maximum number of frames we print for a traceback
const _TracebackMaxFrames = 100
var (
emptystring string
......@@ -716,46 +722,3 @@ var (
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
/*
* mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions),
* but on the contention path they sleep in the kernel.
* a zeroed Mutex is unlocked (no need to initialize each lock).
*/
/*
* sleep and wakeup on one-time events.
* before any calls to notesleep or notewakeup,
* must call noteclear to initialize the Note.
* then, exactly one thread can call notesleep
* and exactly one thread can call notewakeup (once).
* once notewakeup has been called, the notesleep
* will return. future notesleep will return immediately.
* subsequent noteclear must be called only after
* previous notesleep has returned, e.g. it's disallowed
* to call noteclear straight after notewakeup.
*
* notetsleep is like notesleep but wakes up after
* a given number of nanoseconds even if the event
* has not yet happened. if a goroutine uses notetsleep to
* wake up early, it must wait to call noteclear until it
* can be sure that no other goroutine is calling
* notewakeup.
*
* notesleep/notetsleep are generally called on g0,
* notetsleepg is similar to notetsleep but is called on user g.
*/
// bool runtime·notetsleep(Note*, int64); // false - timeout
// bool runtime·notetsleepg(Note*, int64); // false - timeout
/*
* Lock-free stack.
* Initialize uint64 head to 0, compare with 0 to test for emptiness.
* The stack does not keep pointers to nodes,
* so they can be garbage collected if there are no other pointers to nodes.
*/
// for mmap, we only pass the lower 32 bits of file offset to the
// assembly routine; the higher bits (if required), should be provided
// by the assembly routine as 0.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment