Commit 1e2d2f09 authored by Russ Cox's avatar Russ Cox

[dev.cc] runtime: convert memory allocator and garbage collector to Go

The conversion was done with an automated tool and then
modified only as necessary to make it compile and run.

[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]

LGTM=r
R=r
CC=austin, dvyukov, golang-codereviews, iant, khr
https://golang.org/cl/167540043
parent d98553a7
......@@ -26,7 +26,7 @@ func makechan(t *chantype, size int64) *hchan {
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
gothrow("makechan: bad alignment")
}
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (maxmem-hchanSize)/uintptr(elem.size)) {
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
panic("makechan: size out of range")
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -28,10 +28,11 @@ const (
maxGCMask = _MaxGCMask
bitsDead = _BitsDead
bitsPointer = _BitsPointer
bitsScalar = _BitsScalar
mSpanInUse = _MSpanInUse
concurrentSweep = _ConcurrentSweep != 0
concurrentSweep = _ConcurrentSweep
)
// Page number (address>>pageShift)
......@@ -142,10 +143,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[tinySizeClass]
v := s.freelist
if v == nil {
mp := acquirem()
mp.scalararg[0] = tinySizeClass
onM(mcacheRefill_m)
releasem(mp)
onM(func() {
mCache_Refill(c, tinySizeClass)
})
s = c.alloc[tinySizeClass]
v = s.freelist
}
......@@ -173,10 +173,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[sizeclass]
v := s.freelist
if v == nil {
mp := acquirem()
mp.scalararg[0] = uintptr(sizeclass)
onM(mcacheRefill_m)
releasem(mp)
onM(func() {
mCache_Refill(c, int32(sizeclass))
})
s = c.alloc[sizeclass]
v = s.freelist
}
......@@ -193,13 +192,10 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
}
c.local_cachealloc += intptr(size)
} else {
mp := acquirem()
mp.scalararg[0] = uintptr(size)
mp.scalararg[1] = uintptr(flags)
onM(largeAlloc_m)
s = (*mspan)(mp.ptrarg[0])
mp.ptrarg[0] = nil
releasem(mp)
var s *mspan
onM(func() {
s = largeAlloc(size, uint32(flags))
})
x = unsafe.Pointer(uintptr(s.start << pageShift))
size = uintptr(s.elemsize)
}
......@@ -359,7 +355,7 @@ func newarray(typ *_type, n uintptr) unsafe.Pointer {
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) {
if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
panic("runtime: allocation size out of range")
}
return mallocgc(uintptr(typ.size)*n, typ, flags)
......@@ -585,10 +581,9 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
ftyp := f._type
if ftyp == nil {
// switch to M stack and remove finalizer
mp := acquirem()
mp.ptrarg[0] = e.data
onM(removeFinalizer_m)
releasem(mp)
onM(func() {
removefinalizer(e.data)
})
return
}
......@@ -633,18 +628,11 @@ okarg:
// make sure we have a finalizer goroutine
createfing()
// switch to M stack to add finalizer record
mp := acquirem()
mp.ptrarg[0] = f.data
mp.ptrarg[1] = e.data
mp.scalararg[0] = nret
mp.ptrarg[2] = unsafe.Pointer(fint)
mp.ptrarg[3] = unsafe.Pointer(ot)
onM(setFinalizer_m)
if mp.scalararg[0] != 1 {
gothrow("runtime.SetFinalizer: finalizer already set")
}
releasem(mp)
onM(func() {
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
gothrow("runtime.SetFinalizer: finalizer already set")
}
})
}
// round n up to a multiple of a. a must be a power of 2.
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Per-P malloc cache for small objects.
//
// See malloc.h for an overview.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
extern volatile intgo runtime·MemProfileRate;
// dummy MSpan that contains no free objects.
MSpan runtime·emptymspan;
MCache*
runtime·allocmcache(void)
{
intgo rate;
MCache *c;
int32 i;
runtime·lock(&runtime·mheap.lock);
c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
runtime·unlock(&runtime·mheap.lock);
runtime·memclr((byte*)c, sizeof(*c));
for(i = 0; i < NumSizeClasses; i++)
c->alloc[i] = &runtime·emptymspan;
// Set first allocation sample size.
rate = runtime·MemProfileRate;
if(rate > 0x3fffffff) // make 2*rate not overflow
rate = 0x3fffffff;
if(rate != 0)
c->next_sample = runtime·fastrand1() % (2*rate);
return c;
}
static void
freemcache(MCache *c)
{
runtime·MCache_ReleaseAll(c);
runtime·stackcache_clear(c);
runtime·gcworkbuffree(c->gcworkbuf);
runtime·lock(&runtime·mheap.lock);
runtime·purgecachedstats(c);
runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
runtime·unlock(&runtime·mheap.lock);
}
static void
freemcache_m(void)
{
MCache *c;
c = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
freemcache(c);
}
void
runtime·freemcache(MCache *c)
{
void (*fn)(void);
g->m->ptrarg[0] = c;
fn = freemcache_m;
runtime·onM(&fn);
}
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass. Returns this span.
MSpan*
runtime·MCache_Refill(MCache *c, int32 sizeclass)
{
MSpan *s;
g->m->locks++;
// Return the current cached span to the central lists.
s = c->alloc[sizeclass];
if(s->freelist != nil)
runtime·throw("refill on a nonempty span");
if(s != &runtime·emptymspan)
s->incache = false;
// Get a new cached span from the central lists.
s = runtime·MCentral_CacheSpan(&runtime·mheap.central[sizeclass].mcentral);
if(s == nil)
runtime·throw("out of memory");
if(s->freelist == nil) {
runtime·printf("%d %d\n", s->ref, (int32)((s->npages << PageShift) / s->elemsize));
runtime·throw("empty span");
}
c->alloc[sizeclass] = s;
g->m->locks--;
return s;
}
void
runtime·MCache_ReleaseAll(MCache *c)
{
int32 i;
MSpan *s;
for(i=0; i<NumSizeClasses; i++) {
s = c->alloc[i];
if(s != &runtime·emptymspan) {
runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s);
c->alloc[i] = &runtime·emptymspan;
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Per-P malloc cache for small objects.
//
// See malloc.h for an overview.
package runtime
import "unsafe"
// dummy MSpan that contains no free objects.
var emptymspan mspan
func allocmcache() *mcache {
lock(&mheap_.lock)
c := (*mcache)(fixAlloc_Alloc(&mheap_.cachealloc))
unlock(&mheap_.lock)
memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
for i := 0; i < _NumSizeClasses; i++ {
c.alloc[i] = &emptymspan
}
// Set first allocation sample size.
rate := MemProfileRate
if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff
}
if rate != 0 {
c.next_sample = int32(int(fastrand1()) % (2 * rate))
}
return c
}
func freemcache(c *mcache) {
onM(func() {
mCache_ReleaseAll(c)
stackcache_clear(c)
gcworkbuffree(c.gcworkbuf)
lock(&mheap_.lock)
purgecachedstats(c)
fixAlloc_Free(&mheap_.cachealloc, unsafe.Pointer(c))
unlock(&mheap_.lock)
})
}
// Gets a span that has a free object in it and assigns it
// to be the cached span for the given sizeclass. Returns this span.
func mCache_Refill(c *mcache, sizeclass int32) *mspan {
_g_ := getg()
_g_.m.locks++
// Return the current cached span to the central lists.
s := c.alloc[sizeclass]
if s.freelist != nil {
gothrow("refill on a nonempty span")
}
if s != &emptymspan {
s.incache = false
}
// Get a new cached span from the central lists.
s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
if s == nil {
gothrow("out of memory")
}
if s.freelist == nil {
println(s.ref, (s.npages<<_PageShift)/s.elemsize)
gothrow("empty span")
}
c.alloc[sizeclass] = s
_g_.m.locks--
return s
}
func mCache_ReleaseAll(c *mcache) {
for i := 0; i < _NumSizeClasses; i++ {
s := c.alloc[i]
if s != &emptymspan {
mCentral_UncacheSpan(&mheap_.central[i].mcentral, s)
c.alloc[i] = &emptymspan
}
}
}
......@@ -10,118 +10,110 @@
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
// and those that are completely allocated (c->empty).
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
package runtime
static MSpan* MCentral_Grow(MCentral *c);
import "unsafe"
// Initialize a single central free list.
void
runtime·MCentral_Init(MCentral *c, int32 sizeclass)
{
c->sizeclass = sizeclass;
runtime·MSpanList_Init(&c->nonempty);
runtime·MSpanList_Init(&c->empty);
func mCentral_Init(c *mcentral, sizeclass int32) {
c.sizeclass = sizeclass
mSpanList_Init(&c.nonempty)
mSpanList_Init(&c.empty)
}
// Allocate a span to use in an MCache.
MSpan*
runtime·MCentral_CacheSpan(MCentral *c)
{
MSpan *s;
int32 cap, n;
uint32 sg;
runtime·lock(&c->lock);
sg = runtime·mheap.sweepgen;
func mCentral_CacheSpan(c *mcentral) *mspan {
lock(&c.lock)
sg := mheap_.sweepgen
retry:
for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
runtime·MSpanList_Remove(s);
runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(&c->lock);
runtime·MSpan_Sweep(s, true);
goto havespan;
var s *mspan
for s = c.nonempty.next; s != &c.nonempty; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
mSpanList_Remove(s)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
mSpan_Sweep(s, true)
goto havespan
}
if(s->sweepgen == sg-1) {
if s.sweepgen == sg-1 {
// the span is being swept by background sweeper, skip
continue;
continue
}
// we have a nonempty span that does not require sweeping, allocate from it
runtime·MSpanList_Remove(s);
runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(&c->lock);
goto havespan;
mSpanList_Remove(s)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
goto havespan
}
for(s = c->empty.next; s != &c->empty; s = s->next) {
if(s->sweepgen == sg-2 && runtime·cas(&s->sweepgen, sg-2, sg-1)) {
for s = c.empty.next; s != &c.empty; s = s.next {
if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
// we have an empty span that requires sweeping,
// sweep it and see if we can free some space in it
runtime·MSpanList_Remove(s);
mSpanList_Remove(s)
// swept spans are at the end of the list
runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(&c->lock);
runtime·MSpan_Sweep(s, true);
if(s->freelist != nil)
goto havespan;
runtime·lock(&c->lock);
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
mSpan_Sweep(s, true)
if s.freelist != nil {
goto havespan
}
lock(&c.lock)
// the span is still empty after sweep
// it is already in the empty list, so just retry
goto retry;
goto retry
}
if(s->sweepgen == sg-1) {
if s.sweepgen == sg-1 {
// the span is being swept by background sweeper, skip
continue;
continue
}
// already swept empty span,
// all subsequent ones must also be either swept or in process of sweeping
break;
break
}
runtime·unlock(&c->lock);
unlock(&c.lock)
// Replenish central list if empty.
s = MCentral_Grow(c);
if(s == nil)
return nil;
runtime·lock(&c->lock);
runtime·MSpanList_InsertBack(&c->empty, s);
runtime·unlock(&c->lock);
s = mCentral_Grow(c)
if s == nil {
return nil
}
lock(&c.lock)
mSpanList_InsertBack(&c.empty, s)
unlock(&c.lock)
havespan:
// At this point s is a non-empty span, queued at the end of the empty list,
// c is unlocked.
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
if(n == 0)
runtime·throw("empty span");
if(s->freelist == nil)
runtime·throw("freelist empty");
s->incache = true;
return s;
havespan:
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref)
if n == 0 {
gothrow("empty span")
}
if s.freelist == nil {
gothrow("freelist empty")
}
s.incache = true
return s
}
// Return span from an MCache.
void
runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
{
int32 cap, n;
runtime·lock(&c->lock);
func mCentral_UncacheSpan(c *mcentral, s *mspan) {
lock(&c.lock)
s->incache = false;
s.incache = false
if(s->ref == 0)
runtime·throw("uncaching full span");
if s.ref == 0 {
gothrow("uncaching full span")
}
cap = (s->npages << PageShift) / s->elemsize;
n = cap - s->ref;
if(n > 0) {
runtime·MSpanList_Remove(s);
runtime·MSpanList_Insert(&c->nonempty, s);
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref)
if n > 0 {
mSpanList_Remove(s)
mSpanList_Insert(&c.nonempty, s)
}
runtime·unlock(&c->lock);
unlock(&c.lock)
}
// Free n objects from a span s back into the central free list c.
......@@ -130,85 +122,78 @@ runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s)
// the latest generation.
// If preserve=true, don't return the span to heap nor relink in MCentral lists;
// caller takes care of it.
bool
runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end, bool preserve)
{
bool wasempty;
if(s->incache)
runtime·throw("freespan into cached span");
func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start *mlink, end *mlink, preserve bool) bool {
if s.incache {
gothrow("freespan into cached span")
}
// Add the objects back to s's free list.
wasempty = s->freelist == nil;
end->next = s->freelist;
s->freelist = start;
s->ref -= n;
wasempty := s.freelist == nil
end.next = s.freelist
s.freelist = start
s.ref -= uint16(n)
if(preserve) {
if preserve {
// preserve is set only when called from MCentral_CacheSpan above,
// the span must be in the empty list.
if(s->next == nil)
runtime·throw("can't preserve unlinked span");
runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
return false;
if s.next == nil {
gothrow("can't preserve unlinked span")
}
atomicstore(&s.sweepgen, mheap_.sweepgen)
return false
}
runtime·lock(&c->lock);
lock(&c.lock)
// Move to nonempty if necessary.
if(wasempty) {
runtime·MSpanList_Remove(s);
runtime·MSpanList_Insert(&c->nonempty, s);
if wasempty {
mSpanList_Remove(s)
mSpanList_Insert(&c.nonempty, s)
}
// delay updating sweepgen until here. This is the signal that
// the span may be used in an MCache, so it must come after the
// linked list operations above (actually, just after the
// lock of c above.)
runtime·atomicstore(&s->sweepgen, runtime·mheap.sweepgen);
atomicstore(&s.sweepgen, mheap_.sweepgen)
if(s->ref != 0) {
runtime·unlock(&c->lock);
return false;
if s.ref != 0 {
unlock(&c.lock)
return false
}
// s is completely freed, return it to the heap.
runtime·MSpanList_Remove(s);
s->needzero = 1;
s->freelist = nil;
runtime·unlock(&c->lock);
runtime·unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
runtime·MHeap_Free(&runtime·mheap, s, 0);
return true;
mSpanList_Remove(s)
s.needzero = 1
s.freelist = nil
unlock(&c.lock)
unmarkspan(uintptr(s.start)<<_PageShift, s.npages<<_PageShift)
mHeap_Free(&mheap_, s, 0)
return true
}
// Fetch a new span from the heap and carve into objects for the free list.
static MSpan*
MCentral_Grow(MCentral *c)
{
uintptr size, npages, i, n;
MLink **tailp, *v;
byte *p;
MSpan *s;
npages = runtime·class_to_allocnpages[c->sizeclass];
size = runtime·class_to_size[c->sizeclass];
n = (npages << PageShift) / size;
s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0, 1);
if(s == nil)
return nil;
func mCentral_Grow(c *mcentral) *mspan {
npages := uintptr(class_to_allocnpages[c.sizeclass])
size := uintptr(class_to_size[c.sizeclass])
n := (npages << _PageShift) / size
s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
if s == nil {
return nil
}
// Carve span into sequence of blocks.
tailp = &s->freelist;
p = (byte*)(s->start << PageShift);
s->limit = p + size*n;
for(i=0; i<n; i++) {
v = (MLink*)p;
*tailp = v;
tailp = &v->next;
p += size;
tailp := &s.freelist
p := uintptr(s.start << _PageShift)
s.limit = p + size*n
for i := uintptr(0); i < n; i++ {
v := (*mlink)(unsafe.Pointer(p))
*tailp = v
tailp = &v.next
p += size
}
*tailp = nil;
runtime·markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
return s;
*tailp = nil
markspan(unsafe.Pointer(uintptr(s.start)<<_PageShift), size, n, size*n < s.npages<<_PageShift)
return s
}
......@@ -59,7 +59,11 @@ type MemStats struct {
}
}
var sizeof_C_MStats uintptr // filled in by malloc.goc
// Size of the trailing by_size array differs between Go and C,
// and all data after by_size is local to runtime, not exported.
// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
// sizeof_C_MStats is what C thinks about size of Go struct.
var sizeof_C_MStats = unsafe.Offsetof(memstats.by_size) + 61*unsafe.Sizeof(memstats.by_size[0])
func init() {
var memStats MemStats
......
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "defs_GOOS_GOARCH.h"
#include "os_GOOS.h"
#include "malloc.h"
#include "textflag.h"
#pragma textflag NOSPLIT
void*
runtime·sysAlloc(uintptr n, uint64 *stat)
{
void *v;
v = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(v < (void*)4096)
return nil;
runtime·xadd64(stat, n);
return v;
}
void
runtime·SysUnused(void *v, uintptr n)
{
// Linux's MADV_DONTNEED is like BSD's MADV_FREE.
runtime·madvise(v, n, MADV_FREE);
}
void
runtime·SysUsed(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
runtime·SysFree(void *v, uintptr n, uint64 *stat)
{
runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n);
}
void
runtime·SysFault(void *v, uintptr n)
{
runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
}
void*
runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
*reserved = true;
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096)
return nil;
return p;
}
enum
{
ENOMEM = 12,
};
void
runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
USED(reserved);
runtime·xadd64(stat, n);
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
if(p != v)
runtime·throw("runtime: cannot map pages in arena address space");
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
//go:nosplit
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
v := (unsafe.Pointer)(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(v) < 4096 {
return nil
}
xadd64(stat, int64(n))
return v
}
func sysUnused(v unsafe.Pointer, n uintptr) {
// Linux's MADV_DONTNEED is like BSD's MADV_FREE.
madvise(v, n, _MADV_FREE)
}
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
xadd64(stat, -int64(n))
munmap(v, n)
}
func sysFault(v unsafe.Pointer, n uintptr) {
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
*reserved = true
p := (unsafe.Pointer)(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
if uintptr(p) < 4096 {
return nil
}
return p
}
const (
_ENOMEM = 12
)
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
xadd64(stat, int64(n))
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
if uintptr(p) == _ENOMEM {
gothrow("runtime: out of memory")
}
if p != v {
gothrow("runtime: cannot map pages in arena address space")
}
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "defs_GOOS_GOARCH.h"
#include "os_GOOS.h"
#include "malloc.h"
#include "textflag.h"
enum
{
_PAGE_SIZE = 4096,
EACCES = 13,
};
static int32
addrspace_free(void *v, uintptr n)
{
int32 errval;
uintptr chunk;
uintptr off;
// NOTE: vec must be just 1 byte long here.
// Mincore returns ENOMEM if any of the pages are unmapped,
// but we want to know that all of the pages are unmapped.
// To make these the same, we can only ask about one page
// at a time. See golang.org/issue/7476.
static byte vec[1];
for(off = 0; off < n; off += chunk) {
chunk = _PAGE_SIZE * sizeof vec;
if(chunk > (n - off))
chunk = n - off;
errval = runtime·mincore((int8*)v + off, chunk, vec);
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
if (errval != -ENOMEM)
return 0;
}
return 1;
}
static void *
mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
{
void *p;
p = runtime·mmap(v, n, prot, flags, fd, offset);
if(p != v && addrspace_free(v, n)) {
// On some systems, mmap ignores v without
// MAP_FIXED, so retry if the address space is free.
if(p > (void*)4096)
runtime·munmap(p, n);
p = runtime·mmap(v, n, prot, flags|MAP_FIXED, fd, offset);
}
return p;
}
#pragma textflag NOSPLIT
void*
runtime·sysAlloc(uintptr n, uint64 *stat)
{
void *p;
p = runtime·mmap(nil, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p < (void*)4096) {
if(p == (void*)EACCES) {
runtime·printf("runtime: mmap: access denied\n");
runtime·printf("if you're running SELinux, enable execmem for this process.\n");
runtime·exit(2);
}
if(p == (void*)EAGAIN) {
runtime·printf("runtime: mmap: too much locked memory (check 'ulimit -l').\n");
runtime·exit(2);
}
return nil;
}
runtime·xadd64(stat, n);
return p;
}
void
runtime·SysUnused(void *v, uintptr n)
{
runtime·madvise(v, n, MADV_DONTNEED);
}
void
runtime·SysUsed(void *v, uintptr n)
{
USED(v);
USED(n);
}
void
runtime·SysFree(void *v, uintptr n, uint64 *stat)
{
runtime·xadd64(stat, -(uint64)n);
runtime·munmap(v, n);
}
void
runtime·SysFault(void *v, uintptr n)
{
runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE|MAP_FIXED, -1, 0);
}
void*
runtime·SysReserve(void *v, uintptr n, bool *reserved)
{
void *p;
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
if(sizeof(void*) == 8 && n > 1LL<<32) {
p = mmap_fixed(v, 64<<10, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if (p != v) {
if(p >= (void*)4096)
runtime·munmap(p, 64<<10);
return nil;
}
runtime·munmap(p, 64<<10);
*reserved = false;
return v;
}
p = runtime·mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
if((uintptr)p < 4096)
return nil;
*reserved = true;
return p;
}
void
runtime·SysMap(void *v, uintptr n, bool reserved, uint64 *stat)
{
void *p;
runtime·xadd64(stat, n);
// On 64-bit, we don't actually have v reserved, so tread carefully.
if(!reserved) {
p = mmap_fixed(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
if(p != v) {
runtime·printf("runtime: address space conflict: map(%p) = %p\n", v, p);
runtime·throw("runtime: address space conflict");
}
return;
}
p = runtime·mmap(v, n, PROT_READ|PROT_WRITE, MAP_ANON|MAP_FIXED|MAP_PRIVATE, -1, 0);
if(p == (void*)ENOMEM)
runtime·throw("runtime: out of memory");
if(p != v)
runtime·throw("runtime: cannot map pages in arena address space");
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import "unsafe"
const (
_PAGE_SIZE = 4096
_EACCES = 13
)
// NOTE: vec must be just 1 byte long here.
// Mincore returns ENOMEM if any of the pages are unmapped,
// but we want to know that all of the pages are unmapped.
// To make these the same, we can only ask about one page
// at a time. See golang.org/issue/7476.
var addrspace_vec [1]byte
func addrspace_free(v unsafe.Pointer, n uintptr) bool {
var chunk uintptr
for off := uintptr(0); off < n; off += chunk {
chunk = _PAGE_SIZE * uintptr(len(addrspace_vec))
if chunk > (n - off) {
chunk = n - off
}
errval := mincore(unsafe.Pointer(uintptr(v)+off), chunk, &addrspace_vec[0])
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
if errval != -_ENOMEM {
return false
}
}
return true
}
func mmap_fixed(v unsafe.Pointer, n uintptr, prot, flags, fd int32, offset uint32) unsafe.Pointer {
p := mmap(v, n, prot, flags, fd, offset)
if p != v && addrspace_free(v, n) {
// On some systems, mmap ignores v without
// MAP_FIXED, so retry if the address space is free.
if uintptr(p) > 4096 {
munmap(p, n)
}
p = mmap(v, n, prot, flags|_MAP_FIXED, fd, offset)
}
return p
}
//go:nosplit
func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer {
p := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) < 4096 {
if uintptr(p) == _EACCES {
print("runtime: mmap: access denied\n")
print("if you're running SELinux, enable execmem for this process.\n")
exit(2)
}
if uintptr(p) == _EAGAIN {
print("runtime: mmap: too much locked memory (check 'ulimit -l').\n")
exit(2)
}
return nil
}
xadd64(stat, int64(n))
return p
}
func sysUnused(v unsafe.Pointer, n uintptr) {
madvise(v, n, _MADV_DONTNEED)
}
func sysUsed(v unsafe.Pointer, n uintptr) {
}
func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
xadd64(stat, -int64(n))
munmap(v, n)
}
func sysFault(v unsafe.Pointer, n uintptr) {
mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)
}
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
// On 64-bit, people with ulimit -v set complain if we reserve too
// much address space. Instead, assume that the reservation is okay
// if we can reserve at least 64K and check the assumption in SysMap.
// Only user-mode Linux (UML) rejects these requests.
if ptrSize == 7 && uint64(n) > 1<<32 {
p := mmap_fixed(v, 64<<10, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if p != v {
if uintptr(p) >= 4096 {
munmap(p, 64<<10)
}
return nil
}
munmap(p, 64<<10)
*reserved = false
return v
}
p := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) < 4096 {
return nil
}
*reserved = true
return p
}
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
xadd64(stat, int64(n))
// On 64-bit, we don't actually have v reserved, so tread carefully.
if !reserved {
p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
gothrow("runtime: out of memory")
}
if p != v {
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
gothrow("runtime: address space conflict")
}
return
}
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
gothrow("runtime: out of memory")
}
if p != v {
gothrow("runtime: cannot map pages in arena address space")
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Fixed-size object allocator. Returned memory is not zeroed.
//
// See malloc.h for overview.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
void
runtime·FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat)
{
f->size = size;
f->first = first;
f->arg = arg;
f->list = nil;
f->chunk = nil;
f->nchunk = 0;
f->inuse = 0;
f->stat = stat;
}
void*
runtime·FixAlloc_Alloc(FixAlloc *f)
{
void *v;
if(f->size == 0) {
runtime·printf("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n");
runtime·throw("runtime: internal error");
}
if(f->list) {
v = f->list;
f->list = *(void**)f->list;
f->inuse += f->size;
return v;
}
if(f->nchunk < f->size) {
f->chunk = runtime·persistentalloc(FixAllocChunk, 0, f->stat);
f->nchunk = FixAllocChunk;
}
v = f->chunk;
if(f->first)
f->first(f->arg, v);
f->chunk += f->size;
f->nchunk -= f->size;
f->inuse += f->size;
return v;
}
void
runtime·FixAlloc_Free(FixAlloc *f, void *p)
{
f->inuse -= f->size;
*(void**)p = f->list;
f->list = p;
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Fixed-size object allocator. Returned memory is not zeroed.
//
// See malloc.h for overview.
package runtime
import "unsafe"
// Initialize f to allocate objects of the given size,
// using the allocator to obtain chunks of memory.
func fixAlloc_Init(f *fixalloc, size uintptr, first func(unsafe.Pointer, unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
f.size = size
f.first = *(*unsafe.Pointer)(unsafe.Pointer(&first))
f.arg = arg
f.list = nil
f.chunk = nil
f.nchunk = 0
f.inuse = 0
f.stat = stat
}
func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
if f.size == 0 {
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
gothrow("runtime: internal error")
}
if f.list != nil {
v := unsafe.Pointer(f.list)
f.list = f.list.next
f.inuse += f.size
return v
}
if uintptr(f.nchunk) < f.size {
f.chunk = (*uint8)(persistentalloc(_FixAllocChunk, 0, f.stat))
f.nchunk = _FixAllocChunk
}
v := (unsafe.Pointer)(f.chunk)
if f.first != nil {
fn := *(*func(unsafe.Pointer, unsafe.Pointer))(unsafe.Pointer(&f.first))
fn(f.arg, v)
}
f.chunk = (*byte)(add(unsafe.Pointer(f.chunk), f.size))
f.nchunk -= uint32(f.size)
f.inuse += f.size
return v
}
func fixAlloc_Free(f *fixalloc, p unsafe.Pointer) {
f.inuse -= f.size
v := (*mlink)(p)
v.next = f.list
f.list = v
}
This diff is collapsed.
This diff is collapsed.
......@@ -60,10 +60,8 @@ func clearpools() {
}
}
func gosweepone() uintptr
func gosweepdone() bool
func bgsweep() {
sweep.g = getg()
getg().issystem = true
for {
for gosweepone() != ^uintptr(0) {
......
......@@ -4,11 +4,15 @@
// Garbage collector (GC)
enum {
package runtime
const (
// Four bits per word (see #defines below).
gcBits = 4,
wordsPerBitmapByte = 8/gcBits,
gcBits = 4
wordsPerBitmapByte = 8 / gcBits
)
const (
// GC type info programs.
// The programs allow to store type info required for GC in a compact form.
// Most importantly arrays take O(1) space instead of O(n).
......@@ -26,38 +30,33 @@ enum {
// For example, for type struct { x []byte; y [20]struct{ z int; w *byte }; }
// the program looks as:
//
// insData 3 (BitsMultiWord BitsSlice BitsScalar)
// insData 3 (BitsPointer BitsScalar BitsScalar)
// insArray 20 insData 2 (BitsScalar BitsPointer) insArrayEnd insEnd
//
// Total size of the program is 17 bytes (13 bytes on 32-bits).
// The corresponding GC mask would take 43 bytes (it would be repeated
// because the type has odd number of words).
insData = 1,
insArray,
insArrayEnd,
insEnd,
insData = 1 + iota
insArray
insArrayEnd
insEnd
)
const (
// Pointer map
BitsPerPointer = 2,
BitsMask = (1<<BitsPerPointer)-1,
PointersPerByte = 8/BitsPerPointer,
_BitsPerPointer = 2
_BitsMask = (1 << _BitsPerPointer) - 1
_PointersPerByte = 8 / _BitsPerPointer
// If you change these, also change scanblock.
// scanblock does "if(bits == BitsScalar || bits == BitsDead)" as "if(bits <= BitsScalar)".
BitsDead = 0,
BitsScalar = 1,
BitsPointer = 2,
BitsMultiWord = 3,
// BitsMultiWord will be set for the first word of a multi-word item.
// When it is set, one of the following will be set for the second word.
// NOT USED ANYMORE: BitsString = 0,
// NOT USED ANYMORE: BitsSlice = 1,
BitsIface = 2,
BitsEface = 3,
_BitsDead = 0
_BitsScalar = 1
_BitsPointer = 2
// 64 bytes cover objects of size 1024/512 on 64/32 bits, respectively.
MaxGCMask = 64,
};
_MaxGCMask = 64
)
// Bits in per-word bitmap.
// #defines because we shift the values beyond 32 bits.
......@@ -70,9 +69,9 @@ enum {
// there. On a 64-bit system the off'th word in the arena is tracked by
// the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
// the only difference is that the divisor is 8.)
enum {
bitBoundary = 1, // boundary of an object
bitMarked = 2, // marked object
bitMask = bitBoundary | bitMarked,
bitPtrMask = BitsMask<<2,
};
const (
bitBoundary = 1 // boundary of an object
bitMarked = 2 // marked object
bitMask = bitBoundary | bitMarked
bitPtrMask = _BitsMask << 2
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment