Commit 86a3a542 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: fix windows build

Currently windows crashes because early allocs in schedinit
try to allocate tiny memory blocks, but m->p is not yet setup.
I've considered calling procresize(1) earlier in schedinit,
but this refactoring is better and must fix the issue as well.
Fixes #7218.

R=golang-codereviews, r
CC=golang-codereviews
https://golang.org/cl/54570045
parent 179d41fe
......@@ -42,7 +42,6 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
MCacheList *l;
MLink *v;
byte *tiny;
P *p;
if(size == 0) {
// All 0-length allocations use this pointer.
......@@ -93,10 +92,9 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
// the allocator reduces number of allocations by ~12% and
// reduces heap size by ~20%.
p = m->p;
tinysize = p->tinysize;
tinysize = c->tinysize;
if(size <= tinysize) {
tiny = p->tiny;
tiny = c->tiny;
// Align tiny pointer for required (conservative) alignment.
if((size&7) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 8);
......@@ -104,12 +102,12 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
tiny = (byte*)ROUND((uintptr)tiny, 4);
else if((size&1) == 0)
tiny = (byte*)ROUND((uintptr)tiny, 2);
size1 = size + (tiny - p->tiny);
size1 = size + (tiny - c->tiny);
if(size1 <= tinysize) {
// The object fits into existing tiny block.
v = (MLink*)tiny;
p->tiny += size1;
p->tinysize -= size1;
c->tiny += size1;
c->tinysize -= size1;
m->mallocing = 0;
m->locks--;
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
......@@ -129,8 +127,8 @@ runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
// See if we need to replace the existing tiny block with the new one
// based on amount of remaining free space.
if(TinySize-size > tinysize) {
p->tiny = (byte*)v + size;
p->tinysize = TinySize - size;
c->tiny = (byte*)v + size;
c->tinysize = TinySize - size;
}
size = TinySize;
goto done;
......
......@@ -296,6 +296,10 @@ struct MCache
// so they are grouped here for better caching.
int32 next_sample; // trigger heap sample after allocating this many bytes
intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.goc.
byte* tiny;
uintptr tinysize;
// The rest is not accessed on every malloc.
MCacheList list[NumSizeClasses];
// Local allocator stats, flushed during GC.
......
......@@ -68,6 +68,7 @@ clearpools(void)
{
void **pool, **next;
P *p, **pp;
MCache *c;
uintptr off;
int32 i;
......@@ -86,8 +87,11 @@ clearpools(void)
for(pp=runtime·allp; p=*pp; pp++) {
// clear tinyalloc pool
p->tiny = nil;
p->tinysize = 0;
c = p->mcache;
if(c != nil) {
c->tiny = nil;
c->tinysize = 0;
}
// clear defer pools
for(i=0; i<nelem(p->deferpool); i++)
p->deferpool[i] = nil;
......
......@@ -385,11 +385,6 @@ struct P
MCache* mcache;
Defer* deferpool[5]; // pool of available Defer structs of different sizes (see panic.c)
// Allocator cache for tiny objects w/o pointers.
// See "Tiny allocator" comment in malloc.goc.
byte* tiny;
uintptr tinysize;
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
uint64 goidcache;
uint64 goidcacheend;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment