Commit f8d0d4fd authored by Rick Hudson's avatar Rick Hudson

[dev.garbage] runtime: cleanup and optimize span.base()

Prior to this CL the base of a span was calculated in various
places using shifts or calls to base(). This CL now
always calls base() which has been optimized to calculate the
base of the span when the span is initialized and store that
value in the span structure.

Change-Id: I661f2bfa21e3748a249cdf049ef9062db6e78100
Reviewed-on: https://go-review.googlesource.com/20703Reviewed-by: 's avatarAustin Clements <austin@google.com>
parent 8dda1c4c
......@@ -711,7 +711,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = largeAlloc(size, flags)
})
s.freeindex = 1
x = unsafe.Pointer(uintptr(s.start << pageShift))
x = unsafe.Pointer(s.base())
size = s.elemsize
}
......@@ -833,7 +833,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
if s == nil {
throw("out of memory")
}
s.limit = uintptr(s.start)<<_PageShift + size
s.limit = s.base() + size
heapBitsForSpan(s.base()).initSpan(s)
return s
}
......
......@@ -457,7 +457,7 @@ func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits
} else {
print(" to unused region of span")
}
print("idx=", hex(idx), " span.start=", hex(s.start<<_PageShift), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
print("idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
if refBase != 0 {
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
......
......@@ -212,7 +212,7 @@ func (c *mcentral) grow() *mspan {
return nil
}
p := uintptr(s.start << _PageShift)
p := s.base()
s.limit = p + size*n
heapBitsForSpan(s.base()).initSpan(s)
......
......@@ -287,7 +287,7 @@ func markrootSpans(gcw *gcWork, shard int) {
// retain everything it points to.
spf := (*specialfinalizer)(unsafe.Pointer(sp))
// A finalizer can be set for an inner byte of an object, find object beginning.
p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
// Mark everything that can be reached from
// the object (but *not* the object itself or
......
......@@ -211,13 +211,13 @@ func (s *mspan) sweep(preserve bool) bool {
special := *specialp
for special != nil {
// A finalizer can be set for an inner byte of an object, find object beginning.
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
p := s.base() + uintptr(special.offset)/size*size
mbits := s.markBitsForAddr(p)
if !mbits.isMarked() {
// This object is not marked and has at least one special record.
// Pass 1: see if it has at least one finalizer.
hasFin := false
endOffset := p - uintptr(s.start<<_PageShift) + size
endOffset := p - s.base() + size
for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer.
......@@ -230,7 +230,7 @@ func (s *mspan) sweep(preserve bool) bool {
for special != nil && uintptr(special.offset) < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
p := s.base() + uintptr(special.offset)
if special.kind == _KindSpecialFinalizer || !hasFin {
// Splice out special record.
y := special
......@@ -311,7 +311,7 @@ func (s *mspan) sweep(preserve bool) bool {
// implement and then call some kind of MHeap_DeleteSpan.
if debug.efence > 0 {
s.limit = 0 // prevent mlookup from finding this span
sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
sysFault(unsafe.Pointer(s.base()), size)
} else {
mheap_.freeSpan(s, 1)
}
......
......@@ -116,7 +116,8 @@ type mspan struct {
next *mspan // next span in list, or nil if none
prev **mspan // previous span's next field, or list head's first field if none
list *mSpanList // For debugging. TODO: Remove.
//TODO:(rlh) Eliminate start field and use startAddr >> PageShift instead.
startAddr uintptr // uintptr(s.start << _PageShift) aka s.base()
start pageID // starting page number
npages uintptr // number of pages in span
stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
......@@ -184,7 +185,7 @@ type mspan struct {
}
func (s *mspan) base() uintptr {
return uintptr(s.start << _PageShift)
return s.startAddr
}
func (s *mspan) layout() (size, n, total uintptr) {
......@@ -300,7 +301,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
return 0
}
p := uintptr(s.start) << _PageShift
p := s.base()
if s.sizeclass == 0 {
// Large object.
if base != nil {
......@@ -542,7 +543,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
if s != nil {
if needzero && s.needzero != 0 {
memclr(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift)
}
s.needzero = 0
}
......@@ -610,7 +611,7 @@ HaveSpan:
throw("still in list")
}
if s.npreleased > 0 {
sysUsed(unsafe.Pointer(s.start<<_PageShift), s.npages<<_PageShift)
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift)
s.npreleased = 0
}
......@@ -826,6 +827,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
t := h_spans[p-1]
if t != nil && t.state == _MSpanFree {
s.start = t.start
s.startAddr = uintptr(s.start << _PageShift)
s.npages += t.npages
s.npreleased = t.npreleased // absorb released pages
s.needzero |= t.needzero
......@@ -925,6 +927,7 @@ func (span *mspan) init(start pageID, npages uintptr) {
span.prev = nil
span.list = nil
span.start = start
span.startAddr = uintptr(start << _PageShift)
span.npages = npages
span.allocCount = 0
span.sizeclass = 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment