Commit e1c4e9a7 authored by Rick Hudson's avatar Rick Hudson

[dev.garbage] runtime: refactor next free object

In preparation for changing how the next free object is chosen
refactor and consolidate code into a single function.

Change-Id: I6836cd88ed7cbf0b2df87abd7c1c3b9fabc1cbd8
Reviewed-on: https://go-review.googlesource.com/19317Reviewed-by: 's avatarAustin Clements <austin@google.com>
parent aed86103
...@@ -496,6 +496,32 @@ const ( ...@@ -496,6 +496,32 @@ const (
_FlagNoZero = 1 << 1 // don't zero memory _FlagNoZero = 1 << 1 // don't zero memory
) )
// nextFree returns the next free object from the cached span if one is available.
// Otherwise it refills the cache with a span with an available object and
// returns that object along with a flag indicating that this was a heavy
// weight allocation. If it is a heavy weight allocation the caller must
// determine whether a new GC cycle needs to be started or if the GC is active
// whether this goroutine needs to assist the GC.
// https://golang.org/cl/5350 motivates why this routine should preform a
// prefetch.
func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
s := c.alloc[sizeclass]
v = s.freelist
if v.ptr() == nil {
systemstack(func() {
c.refill(int32(sizeclass))
})
shouldhelpgc = true
s = c.alloc[sizeclass]
v = s.freelist
}
s.freelist = v.ptr().next
s.ref++
// prefetchnta offers best performance, see change list message.
prefetchnta(uintptr(v.ptr().next))
return
}
// Allocate an object of size bytes. // Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists. // Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap. // Large objects (> 32 kB) are allocated straight from the heap.
...@@ -554,7 +580,6 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -554,7 +580,6 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
shouldhelpgc := false shouldhelpgc := false
dataSize := size dataSize := size
c := gomcache() c := gomcache()
var s *mspan
var x unsafe.Pointer var x unsafe.Pointer
if size <= maxSmallSize { if size <= maxSmallSize {
if flags&flagNoScan != 0 && size < maxTinySize { if flags&flagNoScan != 0 && size < maxTinySize {
...@@ -606,20 +631,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -606,20 +631,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
return x return x
} }
// Allocate a new maxTinySize block. // Allocate a new maxTinySize block.
s = c.alloc[tinySizeClass] var v gclinkptr
v := s.freelist v, shouldhelpgc = c.nextFree(tinySizeClass)
if v.ptr() == nil {
systemstack(func() {
c.refill(tinySizeClass)
})
shouldhelpgc = true
s = c.alloc[tinySizeClass]
v = s.freelist
}
s.freelist = v.ptr().next
s.ref++
// prefetchnta offers best performance, see change list message.
prefetchnta(uintptr(v.ptr().next))
x = unsafe.Pointer(v) x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0 (*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0 (*[2]uint64)(x)[1] = 0
...@@ -638,20 +651,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -638,20 +651,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
sizeclass = size_to_class128[(size-1024+127)>>7] sizeclass = size_to_class128[(size-1024+127)>>7]
} }
size = uintptr(class_to_size[sizeclass]) size = uintptr(class_to_size[sizeclass])
s = c.alloc[sizeclass] var v gclinkptr
v := s.freelist v, shouldhelpgc = c.nextFree(sizeclass)
if v.ptr() == nil {
systemstack(func() {
c.refill(int32(sizeclass))
})
shouldhelpgc = true
s = c.alloc[sizeclass]
v = s.freelist
}
s.freelist = v.ptr().next
s.ref++
// prefetchnta offers best performance, see change list message.
prefetchnta(uintptr(v.ptr().next))
x = unsafe.Pointer(v) x = unsafe.Pointer(v)
if flags&flagNoZero == 0 { if flags&flagNoZero == 0 {
v.ptr().next = 0 v.ptr().next = 0
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment