Commit e500ffd8 authored by Austin Clements's avatar Austin Clements

runtime: track all heap arenas in a slice

Currently, there's no efficient way to iterate over the Go heap. We're
going to need this for fast free page sweeping, so this CL adds a
slice of all allocated heap arenas. This will also be useful for
generational GC.

For #18155.

Change-Id: I58d126cfb9c3f61b3125d80b74ccb1b2169efbcc
Reviewed-on: https://go-review.googlesource.com/c/138076
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarRick Hudson <rlh@golang.org>
parent b2df0bd5
...@@ -641,6 +641,27 @@ mapped: ...@@ -641,6 +641,27 @@ mapped:
} }
} }
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
if size == 0 {
size = physPageSize
}
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
// double the array each time, this can lead
// to at most 2x waste.
}
h.allArenas = h.allArenas[:len(h.allArenas)+1]
h.allArenas[len(h.allArenas)-1] = ri
// Store atomically just in case an object from the // Store atomically just in case an object from the
// new heap arena becomes visible before the heap lock // new heap arena becomes visible before the heap lock
// is released (which shouldn't happen, but there's // is released (which shouldn't happen, but there's
......
...@@ -133,7 +133,16 @@ type mheap struct { ...@@ -133,7 +133,16 @@ type mheap struct {
// (the actual arenas). This is only used on 32-bit. // (the actual arenas). This is only used on 32-bit.
arena linearAlloc arena linearAlloc
// _ uint32 // ensure 64-bit alignment of central // allArenas is the arenaIndex of every mapped arena. This can
// be used to iterate through the address space.
//
// Access is protected by mheap_.lock. However, since this is
// append-only and old backing arrays are never freed, it is
// safe to acquire mheap_.lock, copy the slice header, and
// then release mheap_.lock.
allArenas []arenaIdx
_ uint32 // ensure 64-bit alignment of central
// central free lists for small size classes. // central free lists for small size classes.
// the padding makes sure that the mcentrals are // the padding makes sure that the mcentrals are
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment