Commit e508a5f0 authored by Michael Anthony Knyszek's avatar Michael Anthony Knyszek Committed by Michael Knyszek

runtime: de-duplicate span scavenging

Currently, span scavenging was done nearly identically in two different
locations. This change deduplicates that into one shared routine.

For #14045.

Change-Id: I15006b2c9af0e70b7a9eae9abb4168d3adca3860
Reviewed-on: https://go-review.googlesource.com/c/139297
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarAustin Clements <austin@google.com>
parent de31f637
...@@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) { ...@@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
return return
} }
func (s *mspan) scavenge() uintptr {
start := s.base()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return 0
}
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return 0
}
memstats.heap_released += uint64(released)
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
return released
}
// recordspan adds a newly allocated span to h.allspans. // recordspan adds a newly allocated span to h.allspans.
// //
// This only happens the first time a span is allocated from // This only happens the first time a span is allocated from
...@@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList { ...@@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr { func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
s := t.spanKey s := t.spanKey
var sumreleased uintptr
if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
start := s.base() if released := s.scavenge(); released != 0 {
end := start + s.npages<<_PageShift return released
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
return sumreleased
}
} }
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
return sumreleased
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
} }
return sumreleased return 0
} }
func scavengelist(list *mSpanList, now, limit uint64) uintptr { func scavengelist(list *mSpanList, now, limit uint64) uintptr {
...@@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr { ...@@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages { if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
continue continue
} }
start := s.base() sumreleased += s.scavenge()
end := start + s.npages<<_PageShift
if physPageSize > _PageSize {
// We can only release pages in
// physPageSize blocks, so round start
// and end in. (Otherwise, madvise
// will round them *out* and release
// more memory than we want.)
start = (start + physPageSize - 1) &^ (physPageSize - 1)
end &^= physPageSize - 1
if end <= start {
// start and end don't span a
// whole physical page.
continue
}
}
len := end - start
released := len - (s.npreleased << _PageShift)
if physPageSize > _PageSize && released == 0 {
continue
}
memstats.heap_released += uint64(released)
sumreleased += released
s.npreleased = len >> _PageShift
sysUnused(unsafe.Pointer(start), len)
} }
return sumreleased return sumreleased
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment