Commit 1bc6be64 authored by Austin Clements's avatar Austin Clements

runtime: mark several types go:notinheap

This covers basically all sysAlloc'd, persistentalloc'd, and
fixalloc'd types.

Change-Id: I0487c887c2a0ade5e33d4c4c12d837e97468e66b
Reviewed-on: https://go-review.googlesource.com/30941Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent 991a85c8
...@@ -68,6 +68,7 @@ type cpuprofEntry struct { ...@@ -68,6 +68,7 @@ type cpuprofEntry struct {
stack [maxCPUProfStack]uintptr stack [maxCPUProfStack]uintptr
} }
//go:notinheap
type cpuProfile struct { type cpuProfile struct {
on bool // profiling is on on bool // profiling is on
wait note // goroutine waits here wait note // goroutine waits here
......
...@@ -903,6 +903,8 @@ var globalAlloc struct { ...@@ -903,6 +903,8 @@ var globalAlloc struct {
// There is no associated free operation. // There is no associated free operation.
// Intended for things like function/type/debug-related persistent data. // Intended for things like function/type/debug-related persistent data.
// If align is 0, uses default align (currently 8). // If align is 0, uses default align (currently 8).
//
// Consider marking persistentalloc'd types go:notinheap.
func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer {
var p unsafe.Pointer var p unsafe.Pointer
systemstack(func() { systemstack(func() {
......
...@@ -11,6 +11,8 @@ import "unsafe" ...@@ -11,6 +11,8 @@ import "unsafe"
// //
// mcaches are allocated from non-GC'd memory, so any heap pointers // mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled. // must be specially handled.
//
//go:notinheap
type mcache struct { type mcache struct {
// The following members are accessed on every malloc, // The following members are accessed on every malloc,
// so they are grouped here for better caching. // so they are grouped here for better caching.
......
...@@ -15,6 +15,8 @@ package runtime ...@@ -15,6 +15,8 @@ package runtime
import "runtime/internal/atomic" import "runtime/internal/atomic"
// Central list of free objects of a given size. // Central list of free objects of a given size.
//
//go:notinheap
type mcentral struct { type mcentral struct {
lock mutex lock mutex
sizeclass int32 sizeclass int32
......
...@@ -12,6 +12,10 @@ import ( ...@@ -12,6 +12,10 @@ import (
"unsafe" "unsafe"
) )
// finblock is allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
//
//go:notinheap
type finblock struct { type finblock struct {
alllink *finblock alllink *finblock
next *finblock next *finblock
...@@ -31,11 +35,11 @@ var allfin *finblock // list of all blocks ...@@ -31,11 +35,11 @@ var allfin *finblock // list of all blocks
// NOTE: Layout known to queuefinalizer. // NOTE: Layout known to queuefinalizer.
type finalizer struct { type finalizer struct {
fn *funcval // function to call fn *funcval // function to call (may be a heap pointer)
arg unsafe.Pointer // ptr to object arg unsafe.Pointer // ptr to object (may be a heap pointer)
nret uintptr // bytes of return values from fn nret uintptr // bytes of return values from fn
fint *_type // type of first argument of fn fint *_type // type of first argument of fn
ot *ptrtype // type of ptr to object ot *ptrtype // type of ptr to object (may be a heap pointer)
} }
var finalizer1 = [...]byte{ var finalizer1 = [...]byte{
...@@ -70,7 +74,6 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot ...@@ -70,7 +74,6 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
lock(&finlock) lock(&finlock)
if finq == nil || finq.cnt == int32(len(finq.fin)) { if finq == nil || finq.cnt == int32(len(finq.fin)) {
if finc == nil { if finc == nil {
// Note: write barrier here, assigning to finc, but should be okay.
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys)) finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc.alllink = allfin finc.alllink = allfin
allfin = finc allfin = finc
......
...@@ -18,6 +18,8 @@ import "unsafe" ...@@ -18,6 +18,8 @@ import "unsafe"
// The caller is responsible for locking around FixAlloc calls. // The caller is responsible for locking around FixAlloc calls.
// Callers can keep state in the object but the first word is // Callers can keep state in the object but the first word is
// smashed by freeing and reallocating. // smashed by freeing and reallocating.
//
// Consider marking fixalloc'd types go:notinheap.
type fixalloc struct { type fixalloc struct {
size uintptr size uintptr
first func(arg, p unsafe.Pointer) // called first time p is returned first func(arg, p unsafe.Pointer) // called first time p is returned
...@@ -34,6 +36,8 @@ type fixalloc struct { ...@@ -34,6 +36,8 @@ type fixalloc struct {
// this cannot be used by some of the internal GC structures. For example when // this cannot be used by some of the internal GC structures. For example when
// the sweeper is placing an unmarked object on the free list it does not want the // the sweeper is placing an unmarked object on the free list it does not want the
// write barrier to be called since that could result in the object being reachable. // write barrier to be called since that could result in the object being reachable.
//
//go:notinheap
type mlink struct { type mlink struct {
next *mlink next *mlink
} }
......
...@@ -28,6 +28,8 @@ const ( ...@@ -28,6 +28,8 @@ const (
// A wbufptr holds a workbuf*, but protects it from write barriers. // A wbufptr holds a workbuf*, but protects it from write barriers.
// workbufs never live on the heap, so write barriers are unnecessary. // workbufs never live on the heap, so write barriers are unnecessary.
// Write barriers on workbuf pointers may also be dangerous in the GC. // Write barriers on workbuf pointers may also be dangerous in the GC.
//
// TODO: Since workbuf is now go:notinheap, this isn't necessary.
type wbufptr uintptr type wbufptr uintptr
func wbufptrOf(w *workbuf) wbufptr { func wbufptrOf(w *workbuf) wbufptr {
...@@ -279,6 +281,7 @@ type workbufhdr struct { ...@@ -279,6 +281,7 @@ type workbufhdr struct {
nobj int nobj int
} }
//go:notinheap
type workbuf struct { type workbuf struct {
workbufhdr workbufhdr
// account for the above fields // account for the above fields
......
...@@ -22,6 +22,11 @@ const minPhysPageSize = 4096 ...@@ -22,6 +22,11 @@ const minPhysPageSize = 4096
// Main malloc heap. // Main malloc heap.
// The heap itself is the "free[]" and "large" arrays, // The heap itself is the "free[]" and "large" arrays,
// but all the other global data is here too. // but all the other global data is here too.
//
// mheap must not be heap-allocated because it contains mSpanLists,
// which must not be heap-allocated.
//
//go:notinheap
type mheap struct { type mheap struct {
lock mutex lock mutex
free [_MaxMHeapList]mSpanList // free lists of given length free [_MaxMHeapList]mSpanList // free lists of given length
...@@ -122,11 +127,13 @@ var mSpanStateNames = []string{ ...@@ -122,11 +127,13 @@ var mSpanStateNames = []string{
// mSpanList heads a linked list of spans. // mSpanList heads a linked list of spans.
// //
//go:notinheap
type mSpanList struct { type mSpanList struct {
first *mspan // first span in list, or nil if none first *mspan // first span in list, or nil if none
last *mspan // last span in list, or nil if none last *mspan // last span in list, or nil if none
} }
//go:notinheap
type mspan struct { type mspan struct {
next *mspan // next span in list, or nil if none next *mspan // next span in list, or nil if none
prev *mspan // previous span in list, or nil if none prev *mspan // previous span in list, or nil if none
...@@ -1073,6 +1080,7 @@ const ( ...@@ -1073,6 +1080,7 @@ const (
// if that happens. // if that happens.
) )
//go:notinheap
type special struct { type special struct {
next *special // linked list in span next *special // linked list in span
offset uint16 // span offset of object offset uint16 // span offset of object
...@@ -1170,12 +1178,17 @@ func removespecial(p unsafe.Pointer, kind uint8) *special { ...@@ -1170,12 +1178,17 @@ func removespecial(p unsafe.Pointer, kind uint8) *special {
} }
// The described object has a finalizer set for it. // The described object has a finalizer set for it.
//
// specialfinalizer is allocated from non-GC'd memory, so any heap
// pointers must be specially handled.
//
//go:notinheap
type specialfinalizer struct { type specialfinalizer struct {
special special special special
fn *funcval fn *funcval // May be a heap pointer.
nret uintptr nret uintptr
fint *_type fint *_type // May be a heap pointer, but always live.
ot *ptrtype ot *ptrtype // May be a heap pointer, but always live.
} }
// Adds a finalizer to the object p. Returns true if it succeeded. // Adds a finalizer to the object p. Returns true if it succeeded.
...@@ -1230,6 +1243,8 @@ func removefinalizer(p unsafe.Pointer) { ...@@ -1230,6 +1243,8 @@ func removefinalizer(p unsafe.Pointer) {
} }
// The described object is being heap profiled. // The described object is being heap profiled.
//
//go:notinheap
type specialprofile struct { type specialprofile struct {
special special special special
b *bucket b *bucket
...@@ -1277,6 +1292,7 @@ type gcBitsHeader struct { ...@@ -1277,6 +1292,7 @@ type gcBitsHeader struct {
next uintptr // *gcBits triggers recursive type bug. (issue 14620) next uintptr // *gcBits triggers recursive type bug. (issue 14620)
} }
//go:notinheap
type gcBits struct { type gcBits struct {
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand. // gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
free uintptr // free is the index into bits of the next free byte. free uintptr // free is the index into bits of the next free byte.
......
...@@ -40,6 +40,10 @@ type bucketType int ...@@ -40,6 +40,10 @@ type bucketType int
// //
// Per-call-stack profiling information. // Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table. // Lookup by hashing call stack into a linked-list hash table.
//
// No heap pointers.
//
//go:notinheap
type bucket struct { type bucket struct {
next *bucket next *bucket
allnext *bucket allnext *bucket
......
...@@ -39,6 +39,10 @@ const ( ...@@ -39,6 +39,10 @@ const (
const pollBlockSize = 4 * 1024 const pollBlockSize = 4 * 1024
// Network poller descriptor. // Network poller descriptor.
//
// No heap pointers.
//
//go:notinheap
type pollDesc struct { type pollDesc struct {
link *pollDesc // in pollcache, protected by pollcache.lock link *pollDesc // in pollcache, protected by pollcache.lock
......
...@@ -134,6 +134,8 @@ type traceBufHeader struct { ...@@ -134,6 +134,8 @@ type traceBufHeader struct {
} }
// traceBuf is per-P tracing buffer. // traceBuf is per-P tracing buffer.
//
//go:notinheap
type traceBuf struct { type traceBuf struct {
traceBufHeader traceBufHeader
arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
...@@ -144,6 +146,8 @@ type traceBuf struct { ...@@ -144,6 +146,8 @@ type traceBuf struct {
// allocated from the GC'd heap, so this is safe, and are often // allocated from the GC'd heap, so this is safe, and are often
// manipulated in contexts where write barriers are not allowed, so // manipulated in contexts where write barriers are not allowed, so
// this is necessary. // this is necessary.
//
// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
type traceBufPtr uintptr type traceBufPtr uintptr
func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) } func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
...@@ -828,11 +832,14 @@ type traceAlloc struct { ...@@ -828,11 +832,14 @@ type traceAlloc struct {
// traceAllocBlock is allocated from non-GC'd memory, so it must not // traceAllocBlock is allocated from non-GC'd memory, so it must not
// contain heap pointers. Writes to pointers to traceAllocBlocks do // contain heap pointers. Writes to pointers to traceAllocBlocks do
// not need write barriers. // not need write barriers.
//
//go:notinheap
type traceAllocBlock struct { type traceAllocBlock struct {
next traceAllocBlockPtr next traceAllocBlockPtr
data [64<<10 - sys.PtrSize]byte data [64<<10 - sys.PtrSize]byte
} }
// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
type traceAllocBlockPtr uintptr type traceAllocBlockPtr uintptr
func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) } func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment