Commit 5a8c11ce authored by Austin Clements's avatar Austin Clements

runtime: rename _MSpan* constants to mSpan*

We already aliased mSpanInUse to _MSpanInUse. The dual constants are
getting annoying, so fix all of these to use the mSpan* naming
convention.

This was done automatically with:
  sed -i -re 's/_?MSpan(Dead|InUse|Manual|Free)/mSpan\1/g' *.go
plus deleting the existing definition of mSpanInUse.

Change-Id: I09979d9d491d06c10689cea625dc57faa9cc6767
Reviewed-on: https://go-review.googlesource.com/137875
Run-TryBot: Austin Clements <austin@google.com>
Reviewed-by: 's avatarBrad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 9eb53ab9
...@@ -126,7 +126,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { ...@@ -126,7 +126,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
} }
s := spanOfUnchecked(uintptr(src)) s := spanOfUnchecked(uintptr(src))
if s.state == _MSpanManual { if s.state == mSpanManual {
// There are no heap bits for value stored on the stack. // There are no heap bits for value stored on the stack.
// For a channel receive src might be on the stack of some // For a channel receive src might be on the stack of some
// other goroutine, so we can't unwind the stack even if // other goroutine, so we can't unwind the stack even if
......
...@@ -430,7 +430,7 @@ func dumproots() { ...@@ -430,7 +430,7 @@ func dumproots() {
// MSpan.types // MSpan.types
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state == _MSpanInUse { if s.state == mSpanInUse {
// Finalizers // Finalizers
for sp := s.specials; sp != nil; sp = sp.next { for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer { if sp.kind != _KindSpecialFinalizer {
...@@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool ...@@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool
func dumpobjs() { func dumpobjs() {
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state != _MSpanInUse { if s.state != mSpanInUse {
continue continue
} }
p := s.base() p := s.base()
...@@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, ...@@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs,
func dumpmemprof() { func dumpmemprof() {
iterate_memprof(dumpmemprof_callback) iterate_memprof(dumpmemprof_callback)
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state != _MSpanInUse { if s.state != mSpanInUse {
continue continue
} }
for sp := s.specials; sp != nil; sp = sp.next { for sp := s.specials; sp != nil; sp = sp.next {
...@@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n") ...@@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n")
func mdump() { func mdump() {
// make sure we're done sweeping // make sure we're done sweeping
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state == _MSpanInUse { if s.state == mSpanInUse {
s.ensureSwept() s.ensureSwept()
} }
} }
......
...@@ -124,8 +124,6 @@ const ( ...@@ -124,8 +124,6 @@ const (
// have the most objects per span. // have the most objects per span.
maxObjsPerSpan = pageSize / 8 maxObjsPerSpan = pageSize / 8
mSpanInUse = _MSpanInUse
concurrentSweep = _ConcurrentSweep concurrentSweep = _ConcurrentSweep
_PageSize = 1 << _PageShift _PageSize = 1 << _PageShift
......
...@@ -365,7 +365,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui ...@@ -365,7 +365,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui
s = spanOf(p) s = spanOf(p)
// If p is a bad pointer, it may not be in s's bounds. // If p is a bad pointer, it may not be in s's bounds.
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
if s == nil || s.state == _MSpanManual { if s == nil || s.state == mSpanManual {
// If s is nil, the virtual address has never been part of the heap. // If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it. // This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly. // Pointers into stacks are also ok, the runtime manages these explicitly.
...@@ -611,7 +611,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { ...@@ -611,7 +611,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
} }
} }
return return
} else if s.state != _MSpanInUse || dst < s.base() || s.limit <= dst { } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst {
// dst was heap memory at some point, but isn't now. // dst was heap memory at some point, but isn't now.
// It can't be a global. It must be either our stack, // It can't be a global. It must be either our stack,
// or in the case of direct channel sends, it could be // or in the case of direct channel sends, it could be
......
...@@ -1239,7 +1239,7 @@ func gcDumpObject(label string, obj, off uintptr) { ...@@ -1239,7 +1239,7 @@ func gcDumpObject(label string, obj, off uintptr) {
skipped := false skipped := false
size := s.elemsize size := s.elemsize
if s.state == _MSpanManual && size == 0 { if s.state == mSpanManual && size == 0 {
// We're printing something from a stack frame. We // We're printing something from a stack frame. We
// don't know how big it is, so just show up to an // don't know how big it is, so just show up to an
// including off. // including off.
...@@ -1335,7 +1335,7 @@ var useCheckmark = false ...@@ -1335,7 +1335,7 @@ var useCheckmark = false
func initCheckmarks() { func initCheckmarks() {
useCheckmark = true useCheckmark = true
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state == _MSpanInUse { if s.state == mSpanInUse {
heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout()) heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
} }
} }
...@@ -1344,7 +1344,7 @@ func initCheckmarks() { ...@@ -1344,7 +1344,7 @@ func initCheckmarks() {
func clearCheckmarks() { func clearCheckmarks() {
useCheckmark = false useCheckmark = false
for _, s := range mheap_.allspans { for _, s := range mheap_.allspans {
if s.state == _MSpanInUse { if s.state == mSpanInUse {
heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout()) heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
} }
} }
......
...@@ -159,7 +159,7 @@ func (s *mspan) ensureSwept() { ...@@ -159,7 +159,7 @@ func (s *mspan) ensureSwept() {
if atomic.Load(&s.sweepgen) == sg { if atomic.Load(&s.sweepgen) == sg {
return return
} }
// The caller must be sure that the span is a MSpanInUse span. // The caller must be sure that the span is a mSpanInUse span.
if atomic.Cas(&s.sweepgen, sg-2, sg-1) { if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
s.sweep(false) s.sweep(false)
return return
......
...@@ -82,7 +82,7 @@ type mheap struct { ...@@ -82,7 +82,7 @@ type mheap struct {
// accounting for current progress. If we could only adjust // accounting for current progress. If we could only adjust
// the slope, it would create a discontinuity in debt if any // the slope, it would create a discontinuity in debt if any
// progress has already been made. // progress has already been made.
pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock pagesInUse uint64 // pages of spans in stats mSpanInUse; R/W with mheap.lock
pagesSwept uint64 // pages swept this cycle; updated atomically pagesSwept uint64 // pages swept this cycle; updated atomically
pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without
...@@ -199,18 +199,18 @@ type arenaHint struct { ...@@ -199,18 +199,18 @@ type arenaHint struct {
// An MSpan is a run of pages. // An MSpan is a run of pages.
// //
// When a MSpan is in the heap free list, state == MSpanFree // When a MSpan is in the heap free list, state == mSpanFree
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
// //
// When a MSpan is allocated, state == MSpanInUse or MSpanManual // When a MSpan is allocated, state == mSpanInUse or mSpanManual
// and heapmap(i) == span for all s->start <= i < s->start+s->npages. // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
// Every MSpan is in one doubly-linked list, // Every MSpan is in one doubly-linked list,
// either one of the MHeap's free lists or one of the // either one of the MHeap's free lists or one of the
// MCentral's span lists. // MCentral's span lists.
// An MSpan representing actual memory has state _MSpanInUse, // An MSpan representing actual memory has state mSpanInUse,
// _MSpanManual, or _MSpanFree. Transitions between these states are // mSpanManual, or mSpanFree. Transitions between these states are
// constrained as follows: // constrained as follows:
// //
// * A span may transition from free to in-use or manual during any GC // * A span may transition from free to in-use or manual during any GC
...@@ -226,19 +226,19 @@ type arenaHint struct { ...@@ -226,19 +226,19 @@ type arenaHint struct {
type mSpanState uint8 type mSpanState uint8
const ( const (
_MSpanDead mSpanState = iota mSpanDead mSpanState = iota
_MSpanInUse // allocated for garbage collected heap mSpanInUse // allocated for garbage collected heap
_MSpanManual // allocated for manual management (e.g., stack allocator) mSpanManual // allocated for manual management (e.g., stack allocator)
_MSpanFree mSpanFree
) )
// mSpanStateNames are the names of the span states, indexed by // mSpanStateNames are the names of the span states, indexed by
// mSpanState. // mSpanState.
var mSpanStateNames = []string{ var mSpanStateNames = []string{
"_MSpanDead", "mSpanDead",
"_MSpanInUse", "mSpanInUse",
"_MSpanManual", "mSpanManual",
"_MSpanFree", "mSpanFree",
} }
// mSpanList heads a linked list of spans. // mSpanList heads a linked list of spans.
...@@ -258,7 +258,7 @@ type mspan struct { ...@@ -258,7 +258,7 @@ type mspan struct {
startAddr uintptr // address of first byte of span aka s.base() startAddr uintptr // address of first byte of span aka s.base()
npages uintptr // number of pages in span npages uintptr // number of pages in span
manualFreeList gclinkptr // list of free objects in _MSpanManual spans manualFreeList gclinkptr // list of free objects in mSpanManual spans
// freeindex is the slot index between 0 and nelems at which to begin scanning // freeindex is the slot index between 0 and nelems at which to begin scanning
// for the next free object in this span. // for the next free object in this span.
...@@ -458,7 +458,7 @@ func (i arenaIdx) l2() uint { ...@@ -458,7 +458,7 @@ func (i arenaIdx) l2() uint {
} }
// inheap reports whether b is a pointer into a (potentially dead) heap object. // inheap reports whether b is a pointer into a (potentially dead) heap object.
// It returns false for pointers into _MSpanManual spans. // It returns false for pointers into mSpanManual spans.
// Non-preemptible because it is used by write barriers. // Non-preemptible because it is used by write barriers.
//go:nowritebarrier //go:nowritebarrier
//go:nosplit //go:nosplit
...@@ -477,7 +477,7 @@ func inHeapOrStack(b uintptr) bool { ...@@ -477,7 +477,7 @@ func inHeapOrStack(b uintptr) bool {
return false return false
} }
switch s.state { switch s.state {
case mSpanInUse, _MSpanManual: case mSpanInUse, mSpanManual:
return b < s.limit return b < s.limit
default: default:
return false return false
...@@ -696,7 +696,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { ...@@ -696,7 +696,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
// able to map interior pointer to containing span. // able to map interior pointer to containing span.
atomic.Store(&s.sweepgen, h.sweepgen) atomic.Store(&s.sweepgen, h.sweepgen)
h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list. h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
s.state = _MSpanInUse s.state = mSpanInUse
s.allocCount = 0 s.allocCount = 0
s.spanclass = spanclass s.spanclass = spanclass
if sizeclass := spanclass.sizeclass(); sizeclass == 0 { if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
...@@ -788,7 +788,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { ...@@ -788,7 +788,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock) lock(&h.lock)
s := h.allocSpanLocked(npage, stat) s := h.allocSpanLocked(npage, stat)
if s != nil { if s != nil {
s.state = _MSpanManual s.state = mSpanManual
s.manualFreeList = 0 s.manualFreeList = 0
s.allocCount = 0 s.allocCount = 0
s.spanclass = 0 s.spanclass = 0
...@@ -829,7 +829,7 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) { ...@@ -829,7 +829,7 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
// Allocates a span of the given size. h must be locked. // Allocates a span of the given size. h must be locked.
// The returned span has been removed from the // The returned span has been removed from the
// free list, but its state is still MSpanFree. // free list, but its state is still mSpanFree.
func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
var list *mSpanList var list *mSpanList
var s *mspan var s *mspan
...@@ -857,7 +857,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { ...@@ -857,7 +857,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
HaveSpan: HaveSpan:
// Mark span in use. // Mark span in use.
if s.state != _MSpanFree { if s.state != mSpanFree {
throw("MHeap_AllocLocked - MSpan not free") throw("MHeap_AllocLocked - MSpan not free")
} }
if s.npages < npage { if s.npages < npage {
...@@ -878,10 +878,10 @@ HaveSpan: ...@@ -878,10 +878,10 @@ HaveSpan:
h.setSpan(t.base(), t) h.setSpan(t.base(), t)
h.setSpan(t.base()+t.npages*pageSize-1, t) h.setSpan(t.base()+t.npages*pageSize-1, t)
t.needzero = s.needzero t.needzero = s.needzero
s.state = _MSpanManual // prevent coalescing with s s.state = mSpanManual // prevent coalescing with s
t.state = _MSpanManual t.state = mSpanManual
h.freeSpanLocked(t, false, false, s.unusedsince) h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = _MSpanFree s.state = mSpanFree
} }
s.unusedsince = 0 s.unusedsince = 0
...@@ -930,7 +930,7 @@ func (h *mheap) grow(npage uintptr) bool { ...@@ -930,7 +930,7 @@ func (h *mheap) grow(npage uintptr) bool {
s.init(uintptr(v), size/pageSize) s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s) h.setSpans(s.base(), s.npages, s)
atomic.Store(&s.sweepgen, h.sweepgen) atomic.Store(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse s.state = mSpanInUse
h.pagesInUse += uint64(s.npages) h.pagesInUse += uint64(s.npages)
h.freeSpanLocked(s, false, true, 0) h.freeSpanLocked(s, false, true, 0)
return true return true
...@@ -986,11 +986,11 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { ...@@ -986,11 +986,11 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
// s must be on a busy list (h.busy or h.busylarge) or unlinked. // s must be on a busy list (h.busy or h.busylarge) or unlinked.
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state { switch s.state {
case _MSpanManual: case mSpanManual:
if s.allocCount != 0 { if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free") throw("MHeap_FreeSpanLocked - invalid stack free")
} }
case _MSpanInUse: case mSpanInUse:
if s.allocCount != 0 || s.sweepgen != h.sweepgen { if s.allocCount != 0 || s.sweepgen != h.sweepgen {
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
throw("MHeap_FreeSpanLocked - invalid free") throw("MHeap_FreeSpanLocked - invalid free")
...@@ -1006,7 +1006,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1006,7 +1006,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
if acctidle { if acctidle {
memstats.heap_idle += uint64(s.npages << _PageShift) memstats.heap_idle += uint64(s.npages << _PageShift)
} }
s.state = _MSpanFree s.state = mSpanFree
if s.inList() { if s.inList() {
h.busyList(s.npages).remove(s) h.busyList(s.npages).remove(s)
} }
...@@ -1020,7 +1020,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1020,7 +1020,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.npreleased = 0 s.npreleased = 0
// Coalesce with earlier, later spans. // Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == _MSpanFree { if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
// Now adjust s. // Now adjust s.
s.startAddr = before.startAddr s.startAddr = before.startAddr
s.npages += before.npages s.npages += before.npages
...@@ -1035,12 +1035,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1035,12 +1035,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
} else { } else {
h.freeList(before.npages).remove(before) h.freeList(before.npages).remove(before)
} }
before.state = _MSpanDead before.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(before)) h.spanalloc.free(unsafe.Pointer(before))
} }
// Now check to see if next (greater addresses) span is free and can be coalesced. // Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == _MSpanFree { if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
s.npages += after.npages s.npages += after.npages
s.npreleased += after.npreleased s.npreleased += after.npreleased
s.needzero |= after.needzero s.needzero |= after.needzero
...@@ -1050,7 +1050,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i ...@@ -1050,7 +1050,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
} else { } else {
h.freeList(after.npages).remove(after) h.freeList(after.npages).remove(after)
} }
after.state = _MSpanDead after.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(after)) h.spanalloc.free(unsafe.Pointer(after))
} }
...@@ -1187,7 +1187,7 @@ func (span *mspan) init(base uintptr, npages uintptr) { ...@@ -1187,7 +1187,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.spanclass = 0 span.spanclass = 0
span.incache = false span.incache = false
span.elemsize = 0 span.elemsize = 0
span.state = _MSpanDead span.state = mSpanDead
span.unusedsince = 0 span.unusedsince = 0
span.npreleased = 0 span.npreleased = 0
span.speciallock.key = 0 span.speciallock.key = 0
......
...@@ -38,7 +38,7 @@ type mstats struct { ...@@ -38,7 +38,7 @@ type mstats struct {
heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_sys uint64 // virtual address space obtained from system for GC'd heap
heap_idle uint64 // bytes in idle spans heap_idle uint64 // bytes in idle spans
heap_inuse uint64 // bytes in _MSpanInUse spans heap_inuse uint64 // bytes in mSpanInUse spans
heap_released uint64 // bytes released to the os heap_released uint64 // bytes released to the os
heap_objects uint64 // total number of allocated objects heap_objects uint64 // total number of allocated objects
......
...@@ -484,7 +484,7 @@ func TestGdbConst(t *testing.T) { ...@@ -484,7 +484,7 @@ func TestGdbConst(t *testing.T) {
"-ex", "print main.aConstant", "-ex", "print main.aConstant",
"-ex", "print main.largeConstant", "-ex", "print main.largeConstant",
"-ex", "print main.minusOne", "-ex", "print main.minusOne",
"-ex", "print 'runtime._MSpanInUse'", "-ex", "print 'runtime.mSpanInUse'",
"-ex", "print 'runtime._PageSize'", "-ex", "print 'runtime._PageSize'",
filepath.Join(dir, "a.exe"), filepath.Join(dir, "a.exe"),
} }
......
...@@ -211,7 +211,7 @@ func stackpoolalloc(order uint8) gclinkptr { ...@@ -211,7 +211,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpoolmu held. // Adds stack x to the free pool. Must be called with stackpoolmu held.
func stackpoolfree(x gclinkptr, order uint8) { func stackpoolfree(x gclinkptr, order uint8) {
s := spanOfUnchecked(uintptr(x)) s := spanOfUnchecked(uintptr(x))
if s.state != _MSpanManual { if s.state != mSpanManual {
throw("freeing stack not in a stack span") throw("freeing stack not in a stack span")
} }
if s.manualFreeList.ptr() == nil { if s.manualFreeList.ptr() == nil {
...@@ -459,7 +459,7 @@ func stackfree(stk stack) { ...@@ -459,7 +459,7 @@ func stackfree(stk stack) {
} }
} else { } else {
s := spanOfUnchecked(uintptr(v)) s := spanOfUnchecked(uintptr(v))
if s.state != _MSpanManual { if s.state != mSpanManual {
println(hex(s.base()), v) println(hex(s.base()), v)
throw("bad span state") throw("bad span state")
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment