Commit 6d1ebeb5 authored by Keith Randall's avatar Keith Randall

runtime: handle holes in the heap

We need to distinguish pointers to free spans, which indicate bugs in
our pointer analysis, from pointers to never-in-the-heap spans, which
can legitimately arise from sysAlloc/mmap/etc.  This normally isn't a
problem because the heap is contiguous, but in some situations (32
bit, particularly) the heap must grow around an already allocated
region.

The bad pointer test is disabled so this fix doesn't actually do
anything, but it removes one barrier from reenabling it.

Fixes #9872.

Change-Id: I0a92db4d43b642c58d2b40af69c906a8d9777f88
Reviewed-on: https://go-review.googlesource.com/5780Reviewed-by: 's avatarDmitry Vyukov <dvyukov@google.com>
parent 29421cbb
......@@ -176,7 +176,10 @@ func heapBitsForObject(p uintptr) (base uintptr, hbits heapBits) {
x -= mheap_.arena_start >> _PageShift
s := h_spans[x]
if s == nil || pageID(k) < s.start || p >= s.limit || s.state != mSpanInUse {
if s != nil && s.state == _MSpanStack {
if s == nil || s.state == _MSpanStack {
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly.
return
}
......
......@@ -128,7 +128,13 @@ func (s *mspan) layout() (size, n, total uintptr) {
}
var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined in Go
var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
// h_spans is a lookup table to map virtual address page IDs to *mspan.
// For allocated spans, their pages map to the span itself.
// For free spans, only the lowest and highest pages map to the span itself. Internal
// pages map to an arbitrary span.
// For pages that have never been allocated, h_spans entries are nil.
var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go
func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h := (*mheap)(vh)
......@@ -568,8 +574,9 @@ func mHeap_Grow(h *mheap, npage uintptr) bool {
mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
p := uintptr(s.start)
p -= (uintptr(unsafe.Pointer(h.arena_start)) >> _PageShift)
h_spans[p] = s
h_spans[p+s.npages-1] = s
for i := p; i < p+s.npages; i++ {
h_spans[i] = s
}
atomicstore(&s.sweepgen, h.sweepgen)
s.state = _MSpanInUse
mHeap_FreeSpanLocked(h, s, false, true, 0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment