Commit 407c56ae authored by Austin Clements's avatar Austin Clements

runtime: generalize {alloc,free}Stack to {alloc,free}Manual

We're going to start using manually-managed spans for GC workbufs, so
rename the allocate/free methods and pass in a pointer to the stats to
use instead of using the stack stats directly.

For #19325.

Change-Id: I37df0147ae5a8e1f3cb37d59c8e57a1fcc6f2980
Reviewed-on: https://go-review.googlesource.com/38576
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent ab9db51e
...@@ -238,11 +238,11 @@ go:notinheap ...@@ -238,11 +238,11 @@ go:notinheap
------------ ------------
`go:notinheap` applies to type declarations. It indicates that a type `go:notinheap` applies to type declarations. It indicates that a type
must never be heap allocated. Specifically, pointers to this type must must never be allocated from the GC'd heap. Specifically, pointers to
always fail the `runtime.inheap` check. The type may be used for this type must always fail the `runtime.inheap` check. The type may be
global variables, for stack variables, or for objects in unmanaged used for global variables, for stack variables, or for objects in
memory (e.g., allocated with `sysAlloc`, `persistentalloc`, or unmanaged memory (e.g., allocated with `sysAlloc`, `persistentalloc`,
`fixalloc`). Specifically: `fixalloc`, or from a manually-managed span). Specifically:
1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap
allocation of T are disallowed. (Though implicit allocations are allocation of T are disallowed. (Though implicit allocations are
......
...@@ -664,11 +664,19 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) ...@@ -664,11 +664,19 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
return s return s
} }
func (h *mheap) allocStack(npage uintptr) *mspan { // allocManual allocates a manually-managed span of npage pages and
_g_ := getg() // adds the bytes used to *stat, which should be a memstats in-use
if _g_ != _g_.m.g0 { // field. allocManual returns nil if allocation fails.
throw("mheap_allocstack not on g0 stack") //
} // The memory backing the returned span may not be zeroed if
// span.needzero is set.
//
// allocManual must be called on the system stack to prevent stack
// growth. Since this is used by the stack allocator, stack growth
// during allocManual would self-deadlock.
//
//go:systemstack
func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock) lock(&h.lock)
s := h.allocSpanLocked(npage) s := h.allocSpanLocked(npage)
if s != nil { if s != nil {
...@@ -679,10 +687,10 @@ func (h *mheap) allocStack(npage uintptr) *mspan { ...@@ -679,10 +687,10 @@ func (h *mheap) allocStack(npage uintptr) *mspan {
s.nelems = 0 s.nelems = 0
s.elemsize = 0 s.elemsize = 0
s.limit = s.base() + s.npages<<_PageShift s.limit = s.base() + s.npages<<_PageShift
memstats.stacks_inuse += uint64(s.npages << _PageShift) *stat += uint64(s.npages << _PageShift)
} }
// This unlock acts as a release barrier. See mHeap_Alloc_m. // This unlock acts as a release barrier. See mheap.alloc_m.
unlock(&h.lock) unlock(&h.lock)
return s return s
...@@ -880,14 +888,21 @@ func (h *mheap) freeSpan(s *mspan, acct int32) { ...@@ -880,14 +888,21 @@ func (h *mheap) freeSpan(s *mspan, acct int32) {
}) })
} }
func (h *mheap) freeStack(s *mspan) { // freeManual frees a manually-managed span returned by allocManual.
_g_ := getg() // stat must be the same as the stat passed to the allocManual that
if _g_ != _g_.m.g0 { // allocated s.
throw("mheap_freestack not on g0 stack") //
} // This must only be called when gcphase == _GCoff. See mSpanState for
// an explanation.
//
// freeManual must be called on the system stack to prevent stack
// growth, just like allocManual.
//
//go:systemstack
func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1 s.needzero = 1
lock(&h.lock) lock(&h.lock)
memstats.stacks_inuse -= uint64(s.npages << _PageShift) *stat -= uint64(s.npages << _PageShift)
h.freeSpanLocked(s, true, true, 0) h.freeSpanLocked(s, true, true, 0)
unlock(&h.lock) unlock(&h.lock)
} }
......
...@@ -186,7 +186,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -186,7 +186,7 @@ func stackpoolalloc(order uint8) gclinkptr {
s := list.first s := list.first
if s == nil { if s == nil {
// no free stacks. Allocate another span worth. // no free stacks. Allocate another span worth.
s = mheap_.allocStack(_StackCacheSize >> _PageShift) s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
if s == nil { if s == nil {
throw("out of memory") throw("out of memory")
} }
...@@ -248,7 +248,7 @@ func stackpoolfree(x gclinkptr, order uint8) { ...@@ -248,7 +248,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
// By not freeing, we prevent step #4 until GC is done. // By not freeing, we prevent step #4 until GC is done.
stackpool[order].remove(s) stackpool[order].remove(s)
s.manualFreeList = 0 s.manualFreeList = 0
mheap_.freeStack(s) mheap_.freeManual(s, &memstats.stacks_inuse)
} }
} }
...@@ -390,7 +390,7 @@ func stackalloc(n uint32) stack { ...@@ -390,7 +390,7 @@ func stackalloc(n uint32) stack {
if s == nil { if s == nil {
// Allocate a new stack from the heap. // Allocate a new stack from the heap.
s = mheap_.allocStack(npage) s = mheap_.allocManual(npage, &memstats.stacks_inuse)
if s == nil { if s == nil {
throw("out of memory") throw("out of memory")
} }
...@@ -472,7 +472,7 @@ func stackfree(stk stack) { ...@@ -472,7 +472,7 @@ func stackfree(stk stack) {
if gcphase == _GCoff { if gcphase == _GCoff {
// Free the stack immediately if we're // Free the stack immediately if we're
// sweeping. // sweeping.
mheap_.freeStack(s) mheap_.freeManual(s, &memstats.stacks_inuse)
} else { } else {
// If the GC is running, we can't return a // If the GC is running, we can't return a
// stack span to the heap because it could be // stack span to the heap because it could be
...@@ -1166,7 +1166,7 @@ func freeStackSpans() { ...@@ -1166,7 +1166,7 @@ func freeStackSpans() {
if s.allocCount == 0 { if s.allocCount == 0 {
list.remove(s) list.remove(s)
s.manualFreeList = 0 s.manualFreeList = 0
mheap_.freeStack(s) mheap_.freeManual(s, &memstats.stacks_inuse)
} }
s = next s = next
} }
...@@ -1180,7 +1180,7 @@ func freeStackSpans() { ...@@ -1180,7 +1180,7 @@ func freeStackSpans() {
for s := stackLarge.free[i].first; s != nil; { for s := stackLarge.free[i].first; s != nil; {
next := s.next next := s.next
stackLarge.free[i].remove(s) stackLarge.free[i].remove(s)
mheap_.freeStack(s) mheap_.freeManual(s, &memstats.stacks_inuse)
s = next s = next
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment