Commit 5c795632 authored by Russ Cox's avatar Russ Cox

runtime: add runtime· prefix to some static variables

Pure renaming. This will make an upcoming CL have smaller diffs.

LGTM=dvyukov, iant
R=iant, dvyukov
CC=golang-codereviews
https://golang.org/cl/142280043
parent 182d1316
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
extern volatile intgo runtime·MemProfileRate; extern volatile intgo runtime·MemProfileRate;
// dummy MSpan that contains no free objects. // dummy MSpan that contains no free objects.
static MSpan emptymspan; static MSpan runtime·emptymspan;
MCache* MCache*
runtime·allocmcache(void) runtime·allocmcache(void)
...@@ -27,7 +27,7 @@ runtime·allocmcache(void) ...@@ -27,7 +27,7 @@ runtime·allocmcache(void)
runtime·unlock(&runtime·mheap.lock); runtime·unlock(&runtime·mheap.lock);
runtime·memclr((byte*)c, sizeof(*c)); runtime·memclr((byte*)c, sizeof(*c));
for(i = 0; i < NumSizeClasses; i++) for(i = 0; i < NumSizeClasses; i++)
c->alloc[i] = &emptymspan; c->alloc[i] = &runtime·emptymspan;
// Set first allocation sample size. // Set first allocation sample size.
rate = runtime·MemProfileRate; rate = runtime·MemProfileRate;
...@@ -83,7 +83,7 @@ runtime·MCache_Refill(MCache *c, int32 sizeclass) ...@@ -83,7 +83,7 @@ runtime·MCache_Refill(MCache *c, int32 sizeclass)
s = c->alloc[sizeclass]; s = c->alloc[sizeclass];
if(s->freelist != nil) if(s->freelist != nil)
runtime·throw("refill on a nonempty span"); runtime·throw("refill on a nonempty span");
if(s != &emptymspan) if(s != &runtime·emptymspan)
s->incache = false; s->incache = false;
// Get a new cached span from the central lists. // Get a new cached span from the central lists.
...@@ -107,9 +107,9 @@ runtime·MCache_ReleaseAll(MCache *c) ...@@ -107,9 +107,9 @@ runtime·MCache_ReleaseAll(MCache *c)
for(i=0; i<NumSizeClasses; i++) { for(i=0; i<NumSizeClasses; i++) {
s = c->alloc[i]; s = c->alloc[i];
if(s != &emptymspan) { if(s != &runtime·emptymspan) {
runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s); runtime·MCentral_UncacheSpan(&runtime·mheap.central[i].mcentral, s);
c->alloc[i] = &emptymspan; c->alloc[i] = &runtime·emptymspan;
} }
} }
} }
This diff is collapsed.
...@@ -32,8 +32,8 @@ enum ...@@ -32,8 +32,8 @@ enum
// Stacks are assigned an order according to size. // Stacks are assigned an order according to size.
// order = log_2(size/FixedStack) // order = log_2(size/FixedStack)
// There is a free list for each order. // There is a free list for each order.
static MSpan stackpool[NumStackOrders]; static MSpan runtime·stackpool[NumStackOrders];
static Mutex stackpoolmu; static Mutex runtime·stackpoolmu;
// TODO: one lock per order? // TODO: one lock per order?
void void
...@@ -45,7 +45,7 @@ runtime·stackinit(void) ...@@ -45,7 +45,7 @@ runtime·stackinit(void)
runtime·throw("cache size must be a multiple of page size"); runtime·throw("cache size must be a multiple of page size");
for(i = 0; i < NumStackOrders; i++) for(i = 0; i < NumStackOrders; i++)
runtime·MSpanList_Init(&stackpool[i]); runtime·MSpanList_Init(&runtime·stackpool[i]);
} }
// Allocates a stack from the free pool. Must be called with // Allocates a stack from the free pool. Must be called with
...@@ -58,7 +58,7 @@ poolalloc(uint8 order) ...@@ -58,7 +58,7 @@ poolalloc(uint8 order)
MLink *x; MLink *x;
uintptr i; uintptr i;
list = &stackpool[order]; list = &runtime·stackpool[order];
s = list->next; s = list->next;
if(s == list) { if(s == list) {
// no free stacks. Allocate another span worth. // no free stacks. Allocate another span worth.
...@@ -99,7 +99,7 @@ poolfree(MLink *x, uint8 order) ...@@ -99,7 +99,7 @@ poolfree(MLink *x, uint8 order)
runtime·throw("freeing stack not in a stack span"); runtime·throw("freeing stack not in a stack span");
if(s->freelist == nil) { if(s->freelist == nil) {
// s will now have a free stack // s will now have a free stack
runtime·MSpanList_Insert(&stackpool[order], s); runtime·MSpanList_Insert(&runtime·stackpool[order], s);
} }
x->next = s->freelist; x->next = s->freelist;
s->freelist = x; s->freelist = x;
...@@ -127,14 +127,14 @@ stackcacherefill(MCache *c, uint8 order) ...@@ -127,14 +127,14 @@ stackcacherefill(MCache *c, uint8 order)
// Grab half of the allowed capacity (to prevent thrashing). // Grab half of the allowed capacity (to prevent thrashing).
list = nil; list = nil;
size = 0; size = 0;
runtime·lock(&stackpoolmu); runtime·lock(&runtime·stackpoolmu);
while(size < StackCacheSize/2) { while(size < StackCacheSize/2) {
x = poolalloc(order); x = poolalloc(order);
x->next = list; x->next = list;
list = x; list = x;
size += FixedStack << order; size += FixedStack << order;
} }
runtime·unlock(&stackpoolmu); runtime·unlock(&runtime·stackpoolmu);
c->stackcache[order].list = list; c->stackcache[order].list = list;
c->stackcache[order].size = size; c->stackcache[order].size = size;
...@@ -150,14 +150,14 @@ stackcacherelease(MCache *c, uint8 order) ...@@ -150,14 +150,14 @@ stackcacherelease(MCache *c, uint8 order)
runtime·printf("stackcacherelease order=%d\n", order); runtime·printf("stackcacherelease order=%d\n", order);
x = c->stackcache[order].list; x = c->stackcache[order].list;
size = c->stackcache[order].size; size = c->stackcache[order].size;
runtime·lock(&stackpoolmu); runtime·lock(&runtime·stackpoolmu);
while(size > StackCacheSize/2) { while(size > StackCacheSize/2) {
y = x->next; y = x->next;
poolfree(x, order); poolfree(x, order);
x = y; x = y;
size -= FixedStack << order; size -= FixedStack << order;
} }
runtime·unlock(&stackpoolmu); runtime·unlock(&runtime·stackpoolmu);
c->stackcache[order].list = x; c->stackcache[order].list = x;
c->stackcache[order].size = size; c->stackcache[order].size = size;
} }
...@@ -170,7 +170,7 @@ runtime·stackcache_clear(MCache *c) ...@@ -170,7 +170,7 @@ runtime·stackcache_clear(MCache *c)
if(StackDebug >= 1) if(StackDebug >= 1)
runtime·printf("stackcache clear\n"); runtime·printf("stackcache clear\n");
runtime·lock(&stackpoolmu); runtime·lock(&runtime·stackpoolmu);
for(order = 0; order < NumStackOrders; order++) { for(order = 0; order < NumStackOrders; order++) {
x = c->stackcache[order].list; x = c->stackcache[order].list;
while(x != nil) { while(x != nil) {
...@@ -181,7 +181,7 @@ runtime·stackcache_clear(MCache *c) ...@@ -181,7 +181,7 @@ runtime·stackcache_clear(MCache *c)
c->stackcache[order].list = nil; c->stackcache[order].list = nil;
c->stackcache[order].size = 0; c->stackcache[order].size = 0;
} }
runtime·unlock(&stackpoolmu); runtime·unlock(&runtime·stackpoolmu);
} }
Stack Stack
...@@ -227,9 +227,9 @@ runtime·stackalloc(uint32 n) ...@@ -227,9 +227,9 @@ runtime·stackalloc(uint32 n)
// procresize. Just get a stack from the global pool. // procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc // Also don't touch stackcache during gc
// as it's flushed concurrently. // as it's flushed concurrently.
runtime·lock(&stackpoolmu); runtime·lock(&runtime·stackpoolmu);
x = poolalloc(order); x = poolalloc(order);
runtime·unlock(&stackpoolmu); runtime·unlock(&runtime·stackpoolmu);
} else { } else {
x = c->stackcache[order].list; x = c->stackcache[order].list;
if(x == nil) { if(x == nil) {
...@@ -289,9 +289,9 @@ runtime·stackfree(Stack stk) ...@@ -289,9 +289,9 @@ runtime·stackfree(Stack stk)
x = (MLink*)v; x = (MLink*)v;
c = g->m->mcache; c = g->m->mcache;
if(c == nil || g->m->gcing || g->m->helpgc) { if(c == nil || g->m->gcing || g->m->helpgc) {
runtime·lock(&stackpoolmu); runtime·lock(&runtime·stackpoolmu);
poolfree(x, order); poolfree(x, order);
runtime·unlock(&stackpoolmu); runtime·unlock(&runtime·stackpoolmu);
} else { } else {
if(c->stackcache[order].size >= StackCacheSize) if(c->stackcache[order].size >= StackCacheSize)
stackcacherelease(c, order); stackcacherelease(c, order);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment