Commit 4d620779 authored by Austin Clements's avatar Austin Clements

runtime: consolidate h_allspans and mheap_.allspans

These are two ways to refer to the allspans array that hark back to
when the runtime was split between C and Go. Clean this up by making
mheap_.allspans a slice and eliminating h_allspans.

Change-Id: Ic9360d040cf3eb590b5dfbab0b82e8ace8525610
Reviewed-on: https://go-review.googlesource.com/30530
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent adda7ad2
...@@ -231,7 +231,7 @@ func CountPagesInUse() (pagesInUse, counted uintptr) { ...@@ -231,7 +231,7 @@ func CountPagesInUse() (pagesInUse, counted uintptr) {
pagesInUse = uintptr(mheap_.pagesInUse) pagesInUse = uintptr(mheap_.pagesInUse)
for _, s := range h_allspans { for _, s := range mheap_.allspans {
if s.state == mSpanInUse { if s.state == mSpanInUse {
counted += s.npages counted += s.npages
} }
......
...@@ -437,7 +437,7 @@ func dumproots() { ...@@ -437,7 +437,7 @@ func dumproots() {
dumpfields(firstmoduledata.gcbssmask) dumpfields(firstmoduledata.gcbssmask)
// MSpan.types // MSpan.types
allspans := h_allspans allspans := mheap_.allspans
for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
s := allspans[spanidx] s := allspans[spanidx]
if s.state == _MSpanInUse { if s.state == _MSpanInUse {
...@@ -463,7 +463,7 @@ var freemark [_PageSize / 8]bool ...@@ -463,7 +463,7 @@ var freemark [_PageSize / 8]bool
func dumpobjs() { func dumpobjs() {
for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
s := h_allspans[i] s := mheap_.allspans[i]
if s.state != _MSpanInUse { if s.state != _MSpanInUse {
continue continue
} }
...@@ -608,7 +608,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, ...@@ -608,7 +608,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs,
func dumpmemprof() { func dumpmemprof() {
iterate_memprof(dumpmemprof_callback) iterate_memprof(dumpmemprof_callback)
allspans := h_allspans allspans := mheap_.allspans
for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ { for spanidx := uint32(0); spanidx < mheap_.nspan; spanidx++ {
s := allspans[spanidx] s := allspans[spanidx]
if s.state != _MSpanInUse { if s.state != _MSpanInUse {
...@@ -632,7 +632,7 @@ var dumphdr = []byte("go1.7 heap dump\n") ...@@ -632,7 +632,7 @@ var dumphdr = []byte("go1.7 heap dump\n")
func mdump() { func mdump() {
// make sure we're done sweeping // make sure we're done sweeping
for i := uintptr(0); i < uintptr(mheap_.nspan); i++ { for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
s := h_allspans[i] s := mheap_.allspans[i]
if s.state == _MSpanInUse { if s.state == _MSpanInUse {
s.ensureSwept() s.ensureSwept()
} }
......
...@@ -1738,12 +1738,11 @@ func gcCopySpans() { ...@@ -1738,12 +1738,11 @@ func gcCopySpans() {
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap. // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock) lock(&mheap_.lock)
// Free the old cached mark array if necessary. // Free the old cached mark array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] { if work.spans != nil && &work.spans[0] != &mheap_.allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys) sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
} }
// Cache the current array for sweeping. // Cache the current array for sweeping.
mheap_.gcspans = mheap_.allspans work.spans = mheap_.allspans
work.spans = h_allspans
unlock(&mheap_.lock) unlock(&mheap_.lock)
} }
......
...@@ -33,15 +33,29 @@ type mheap struct { ...@@ -33,15 +33,29 @@ type mheap struct {
freelarge mSpanList // free lists length >= _MaxMHeapList freelarge mSpanList // free lists length >= _MaxMHeapList
busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length
busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList
allspans **mspan // all spans out there sweepgen uint32 // sweep generation, see comment in mspan
gcspans **mspan // copy of allspans referenced by gc marker or sweeper sweepdone uint32 // all spans are swept
nspan uint32
sweepgen uint32 // sweep generation, see comment in mspan // allspans is a slice of all mspans ever created. Each mspan
sweepdone uint32 // all spans are swept // appears exactly once.
//
// The memory for allspans is manually managed and can be
// reallocated and move as the heap grows.
//
// In general, allspans is protected by mheap_.lock, which
// prevents concurrent access as well as freeing the backing
// store. Accesses during STW might not hold the lock, but
// must ensure that allocation cannot happen around the
// access (since that may free the backing store).
allspans []*mspan // all spans out there
nspan uint32
// span lookup // span lookup
spans **mspan spans **mspan
spans_mapped uintptr spans_mapped uintptr
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep // Proportional sweep
pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
...@@ -233,8 +247,6 @@ func (s *mspan) layout() (size, n, total uintptr) { ...@@ -233,8 +247,6 @@ func (s *mspan) layout() (size, n, total uintptr) {
return return
} }
var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
// h_spans is a lookup table to map virtual address page IDs to *mspan. // h_spans is a lookup table to map virtual address page IDs to *mspan.
// For allocated spans, their pages map to the span itself. // For allocated spans, their pages map to the span itself.
// For free spans, only the lowest and highest pages map to the span itself. Internal // For free spans, only the lowest and highest pages map to the span itself. Internal
...@@ -245,10 +257,10 @@ var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go ...@@ -245,10 +257,10 @@ var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh) h := (*mheap)(vh)
s := (*mspan)(p) s := (*mspan)(p)
if len(h_allspans) >= cap(h_allspans) { if len(h.allspans) >= cap(h.allspans) {
n := 64 * 1024 / sys.PtrSize n := 64 * 1024 / sys.PtrSize
if n < cap(h_allspans)*3/2 { if n < cap(h.allspans)*3/2 {
n = cap(h_allspans) * 3 / 2 n = cap(h.allspans) * 3 / 2
} }
var new []*mspan var new []*mspan
sp := (*slice)(unsafe.Pointer(&new)) sp := (*slice)(unsafe.Pointer(&new))
...@@ -256,21 +268,21 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) { ...@@ -256,21 +268,21 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
if sp.array == nil { if sp.array == nil {
throw("runtime: cannot allocate memory") throw("runtime: cannot allocate memory")
} }
sp.len = len(h_allspans) sp.len = len(h.allspans)
sp.cap = n sp.cap = n
if len(h_allspans) > 0 { if len(h.allspans) > 0 {
copy(new, h_allspans) copy(new, h.allspans)
// Don't free the old array if it's referenced by sweep. }
// See the comment in mgc.go. oldAllspans := h.allspans
if h.allspans != mheap_.gcspans { h.allspans = new
sysFree(unsafe.Pointer(h.allspans), uintptr(cap(h_allspans))*sys.PtrSize, &memstats.other_sys) // Don't free the old array if it's referenced by sweep.
} // See the comment in mgc.go.
if len(oldAllspans) != 0 && &oldAllspans[0] != &work.spans[0] {
sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
} }
h_allspans = new
h.allspans = (**mspan)(sp.array)
} }
h_allspans = append(h_allspans, s) h.allspans = append(h.allspans, s)
h.nspan = uint32(len(h_allspans)) h.nspan = uint32(len(h.allspans))
} }
// inheap reports whether b is a pointer into a (potentially dead) heap object. // inheap reports whether b is a pointer into a (potentially dead) heap object.
......
...@@ -529,7 +529,7 @@ func updatememstats(stats *gcstats) { ...@@ -529,7 +529,7 @@ func updatememstats(stats *gcstats) {
// Scan all spans and count number of alive objects. // Scan all spans and count number of alive objects.
lock(&mheap_.lock) lock(&mheap_.lock)
for i := uint32(0); i < mheap_.nspan; i++ { for i := uint32(0); i < mheap_.nspan; i++ {
s := h_allspans[i] s := mheap_.allspans[i]
if s.state != mSpanInUse { if s.state != mSpanInUse {
continue continue
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment