Commit c1c851bb authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime: avoid unnecessary zeroization of huge memory blocks

+move zeroization out of the heap mutex

R=golang-dev, iant, rsc
CC=golang-dev
https://golang.org/cl/6094050
parent 0d55d983
...@@ -60,7 +60,7 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) ...@@ -60,7 +60,7 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
npages = size >> PageShift; npages = size >> PageShift;
if((size & PageMask) != 0) if((size & PageMask) != 0)
npages++; npages++;
s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1); s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed);
if(s == nil) if(s == nil)
runtime·throw("out of memory"); runtime·throw("out of memory");
size = npages<<PageShift; size = npages<<PageShift;
......
...@@ -380,7 +380,7 @@ struct MHeap ...@@ -380,7 +380,7 @@ struct MHeap
extern MHeap runtime·mheap; extern MHeap runtime·mheap;
void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr)); void runtime·MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct); MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed);
void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct);
MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); MSpan* runtime·MHeap_Lookup(MHeap *h, void *v);
MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v);
......
...@@ -207,7 +207,7 @@ MCentral_Grow(MCentral *c) ...@@ -207,7 +207,7 @@ MCentral_Grow(MCentral *c)
runtime·unlock(c); runtime·unlock(c);
runtime·MGetSizeClassInfo(c->sizeclass, &size, &npages, &n); runtime·MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0); s = runtime·MHeap_Alloc(&runtime·mheap, npages, c->sizeclass, 0, 1);
if(s == nil) { if(s == nil) {
// TODO(rsc): Log out of memory // TODO(rsc): Log out of memory
runtime·lock(c); runtime·lock(c);
......
...@@ -66,7 +66,7 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) ...@@ -66,7 +66,7 @@ runtime·MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
// Allocate a new span of npage pages from the heap // Allocate a new span of npage pages from the heap
// and record its size class in the HeapMap and HeapMapCache. // and record its size class in the HeapMap and HeapMapCache.
MSpan* MSpan*
runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
{ {
MSpan *s; MSpan *s;
...@@ -81,6 +81,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) ...@@ -81,6 +81,8 @@ runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
} }
} }
runtime·unlock(h); runtime·unlock(h);
if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
return s; return s;
} }
...@@ -138,9 +140,6 @@ HaveSpan: ...@@ -138,9 +140,6 @@ HaveSpan:
MHeap_FreeLocked(h, t); MHeap_FreeLocked(h, t);
} }
if(*(uintptr*)(s->start<<PageShift) != 0)
runtime·memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
// Record span info, because gc needs to be // Record span info, because gc needs to be
// able to map interior pointer to containing span. // able to map interior pointer to containing span.
s->sizeclass = sizeclass; s->sizeclass = sizeclass;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment