Commit ab0535ae authored by Shenghou Ma's avatar Shenghou Ma Committed by Keith Randall

liblink, cmd/ld, runtime: remove stackguard1

Now that we've removed all the C code in runtime and the C compilers,
there is no need to have a separate stackguard field to check for C
code on Go stack.

Remove field g.stackguard1 and rename g.stackguard0 to g.stackguard.
Adjust liblink and cmd/ld as necessary.

Change-Id: I54e75db5a93d783e86af5ff1a6cd497d669d8d33
Reviewed-on: https://go-review.googlesource.com/2144Reviewed-by: 's avatarKeith Randall <khr@golang.org>
parent 3b76b017
...@@ -131,7 +131,6 @@ struct LSym ...@@ -131,7 +131,6 @@ struct LSym
short type; short type;
short version; short version;
uchar dupok; uchar dupok;
uchar cfunc;
uchar external; uchar external;
uchar nosplit; uchar nosplit;
uchar reachable; uchar reachable;
......
...@@ -1564,56 +1564,3 @@ diag(char *fmt, ...) ...@@ -1564,56 +1564,3 @@ diag(char *fmt, ...)
errorexit(); errorexit();
} }
} }
void
checkgo(void)
{
LSym *s;
Reloc *r;
int i;
int changed;
if(!debug['C'])
return;
// TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
// which would simplify this logic quite a bit.
// Mark every Go-called C function with cfunc=2, recursively.
do {
changed = 0;
for(s = ctxt->textp; s != nil; s = s->next) {
if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
for(i=0; i<s->nr; i++) {
r = &s->r[i];
if(r->sym == nil)
continue;
if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
if(r->sym->cfunc == 1) {
changed = 1;
r->sym->cfunc = 2;
}
}
}
}
}
}while(changed);
// Complain about Go-called C functions that can split the stack
// (that can be preempted for garbage collection or trigger a stack copy).
for(s = ctxt->textp; s != nil; s = s->next) {
if(s->cfunc == 0 || (s->cfunc == 2 && s->nosplit)) {
for(i=0; i<s->nr; i++) {
r = &s->r[i];
if(r->sym == nil)
continue;
if((r->type == R_CALL || r->type == R_CALLARM) && r->sym->type == STEXT) {
if(s->cfunc == 0 && r->sym->cfunc == 2 && !r->sym->nosplit)
print("Go %s calls C %s\n", s->name, r->sym->name);
else if(s->cfunc == 2 && s->nosplit && !r->sym->nosplit)
print("Go calls C %s calls %s\n", s->name, r->sym->name);
}
}
}
}
}
...@@ -183,7 +183,6 @@ uint16 be16(uchar *b); ...@@ -183,7 +183,6 @@ uint16 be16(uchar *b);
uint32 be32(uchar *b); uint32 be32(uchar *b);
uint64 be64(uchar *b); uint64 be64(uchar *b);
void callgraph(void); void callgraph(void);
void checkgo(void);
void cflush(void); void cflush(void);
void codeblk(int64 addr, int64 size); void codeblk(int64 addr, int64 size);
vlong cpos(void); vlong cpos(void);
......
...@@ -172,7 +172,6 @@ main(int argc, char *argv[]) ...@@ -172,7 +172,6 @@ main(int argc, char *argv[])
mark(linklookup(ctxt, "runtime.read_tls_fallback", 0)); mark(linklookup(ctxt, "runtime.read_tls_fallback", 0));
} }
checkgo();
deadcode(); deadcode();
callgraph(); callgraph();
paramspace = "SP"; /* (FP) now (SP) on output */ paramspace = "SP"; /* (FP) now (SP) on output */
......
...@@ -474,7 +474,7 @@ addstacksplit(Link *ctxt, LSym *cursym) ...@@ -474,7 +474,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p->as = AMOVW; p->as = AMOVW;
p->from.type = D_OREG; p->from.type = D_OREG;
p->from.reg = REGG; p->from.reg = REGG;
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_REG; p->to.type = D_REG;
p->to.reg = 1; p->to.reg = 1;
...@@ -783,9 +783,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt) ...@@ -783,9 +783,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
p->as = AMOVW; p->as = AMOVW;
p->from.type = D_OREG; p->from.type = D_OREG;
p->from.reg = REGG; p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_REG; p->to.type = D_REG;
p->to.reg = 1; p->to.reg = 1;
...@@ -878,10 +876,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt) ...@@ -878,10 +876,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
p->as = ABL; p->as = ABL;
p->scond = C_SCOND_LS; p->scond = C_SCOND_LS;
p->to.type = D_BRANCH; p->to.type = D_BRANCH;
if(ctxt->cursym->cfunc) p->to.sym = ctxt->symmorestack[noctxt];
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
else
p->to.sym = ctxt->symmorestack[noctxt];
// BLS start // BLS start
p = appendp(ctxt, p); p = appendp(ctxt, p);
......
...@@ -452,7 +452,7 @@ addstacksplit(Link *ctxt, LSym *cursym) ...@@ -452,7 +452,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = AMOVQ; p->as = AMOVQ;
p->from.type = D_INDIR+D_CX; p->from.type = D_INDIR+D_CX;
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX; p->to.type = D_BX;
if(ctxt->headtype == Hnacl) { if(ctxt->headtype == Hnacl) {
p->as = AMOVL; p->as = AMOVL;
...@@ -689,9 +689,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog ...@@ -689,9 +689,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p->as = cmp; p->as = cmp;
p->from.type = D_SP; p->from.type = D_SP;
indir_cx(ctxt, &p->to); indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else if(framesize <= StackBig) { } else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall // large stack: SP-framesize <= stackguard-StackSmall
// LEAQ -xxx(SP), AX // LEAQ -xxx(SP), AX
...@@ -706,9 +704,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog ...@@ -706,9 +704,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p->as = cmp; p->as = cmp;
p->from.type = D_AX; p->from.type = D_AX;
indir_cx(ctxt, &p->to); indir_cx(ctxt, &p->to);
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else { } else {
// Such a large stack we need to protect against wraparound. // Such a large stack we need to protect against wraparound.
// If SP is close to zero: // If SP is close to zero:
...@@ -728,9 +724,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog ...@@ -728,9 +724,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = mov; p->as = mov;
indir_cx(ctxt, &p->from); indir_cx(ctxt, &p->from);
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_SI; p->to.type = D_SI;
p = appendp(ctxt, p); p = appendp(ctxt, p);
...@@ -771,10 +765,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog ...@@ -771,10 +765,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int32 textarg, int noctxt, Prog
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = ACALL; p->as = ACALL;
p->to.type = D_BRANCH; p->to.type = D_BRANCH;
if(ctxt->cursym->cfunc) p->to.sym = ctxt->symmorestack[noctxt];
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
else
p->to.sym = ctxt->symmorestack[noctxt];
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = AJMP; p->as = AJMP;
......
...@@ -335,7 +335,7 @@ addstacksplit(Link *ctxt, LSym *cursym) ...@@ -335,7 +335,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = AMOVL; p->as = AMOVL;
p->from.type = D_INDIR+D_CX; p->from.type = D_INDIR+D_CX;
p->from.offset = 4*ctxt->arch->ptrsize; // G.panic p->from.offset = 3*ctxt->arch->ptrsize; // G.panic
p->to.type = D_BX; p->to.type = D_BX;
p = appendp(ctxt, p); p = appendp(ctxt, p);
...@@ -538,9 +538,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok) ...@@ -538,9 +538,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = ACMPL; p->as = ACMPL;
p->from.type = D_SP; p->from.type = D_SP;
p->to.type = D_INDIR+D_CX; p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else if(framesize <= StackBig) { } else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall // large stack: SP-framesize <= stackguard-StackSmall
// LEAL -(framesize-StackSmall)(SP), AX // LEAL -(framesize-StackSmall)(SP), AX
...@@ -555,9 +553,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok) ...@@ -555,9 +553,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = ACMPL; p->as = ACMPL;
p->from.type = D_AX; p->from.type = D_AX;
p->to.type = D_INDIR+D_CX; p->to.type = D_INDIR+D_CX;
p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->to.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->to.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
} else { } else {
// Such a large stack we need to protect against wraparound // Such a large stack we need to protect against wraparound
// if SP is close to zero. // if SP is close to zero.
...@@ -577,9 +573,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok) ...@@ -577,9 +573,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p->as = AMOVL; p->as = AMOVL;
p->from.type = D_INDIR+D_CX; p->from.type = D_INDIR+D_CX;
p->from.offset = 0; p->from.offset = 0;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_SI; p->to.type = D_SI;
p = appendp(ctxt, p); p = appendp(ctxt, p);
...@@ -622,10 +616,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok) ...@@ -622,10 +616,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt, Prog **jmpok)
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = ACALL; p->as = ACALL;
p->to.type = D_BRANCH; p->to.type = D_BRANCH;
if(ctxt->cursym->cfunc) p->to.sym = ctxt->symmorestack[noctxt];
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
else
p->to.sym = ctxt->symmorestack[noctxt];
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = AJMP; p->as = AJMP;
......
...@@ -492,7 +492,7 @@ addstacksplit(Link *ctxt, LSym *cursym) ...@@ -492,7 +492,7 @@ addstacksplit(Link *ctxt, LSym *cursym)
q->as = AMOVD; q->as = AMOVD;
q->from.type = D_OREG; q->from.type = D_OREG;
q->from.reg = REGG; q->from.reg = REGG;
q->from.offset = 4*ctxt->arch->ptrsize; // G.panic q->from.offset = 3*ctxt->arch->ptrsize; // G.panic
q->to.type = D_REG; q->to.type = D_REG;
q->to.reg = 3; q->to.reg = 3;
...@@ -724,9 +724,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt) ...@@ -724,9 +724,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
p->as = AMOVD; p->as = AMOVD;
p->from.type = D_OREG; p->from.type = D_OREG;
p->from.reg = REGG; p->from.reg = REGG;
p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard0 p->from.offset = 2*ctxt->arch->ptrsize; // G.stackguard
if(ctxt->cursym->cfunc)
p->from.offset = 3*ctxt->arch->ptrsize; // G.stackguard1
p->to.type = D_REG; p->to.type = D_REG;
p->to.reg = 3; p->to.reg = 3;
...@@ -834,10 +832,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt) ...@@ -834,10 +832,7 @@ stacksplit(Link *ctxt, Prog *p, int32 framesize, int noctxt)
p = appendp(ctxt, p); p = appendp(ctxt, p);
p->as = ABL; p->as = ABL;
p->to.type = D_BRANCH; p->to.type = D_BRANCH;
if(ctxt->cursym->cfunc) p->to.sym = ctxt->symmorestack[noctxt];
p->to.sym = linklookup(ctxt, "runtime.morestackc", 0);
else
p->to.sym = ctxt->symmorestack[noctxt];
// BR start // BR start
p = appendp(ctxt, p); p = appendp(ctxt, p);
......
...@@ -332,8 +332,6 @@ writesym(Link *ctxt, Biobuf *b, LSym *s) ...@@ -332,8 +332,6 @@ writesym(Link *ctxt, Biobuf *b, LSym *s)
Bprint(ctxt->bso, "t=%d ", s->type); Bprint(ctxt->bso, "t=%d ", s->type);
if(s->dupok) if(s->dupok)
Bprint(ctxt->bso, "dupok "); Bprint(ctxt->bso, "dupok ");
if(s->cfunc)
Bprint(ctxt->bso, "cfunc ");
if(s->nosplit) if(s->nosplit)
Bprint(ctxt->bso, "nosplit "); Bprint(ctxt->bso, "nosplit ");
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value); Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
...@@ -399,7 +397,7 @@ writesym(Link *ctxt, Biobuf *b, LSym *s) ...@@ -399,7 +397,7 @@ writesym(Link *ctxt, Biobuf *b, LSym *s)
wrint(b, s->args); wrint(b, s->args);
wrint(b, s->locals); wrint(b, s->locals);
wrint(b, s->nosplit); wrint(b, s->nosplit);
wrint(b, s->leaf | s->cfunc<<1); wrint(b, s->leaf);
n = 0; n = 0;
for(a = s->autom; a != nil; a = a->link) for(a = s->autom; a != nil; a = a->link)
n++; n++;
...@@ -643,7 +641,6 @@ overwrite: ...@@ -643,7 +641,6 @@ overwrite:
s->nosplit = rdint(f); s->nosplit = rdint(f);
v = rdint(f); v = rdint(f);
s->leaf = v&1; s->leaf = v&1;
s->cfunc = v&2;
n = rdint(f); n = rdint(f);
for(i=0; i<n; i++) { for(i=0; i<n; i++) {
a = emallocz(sizeof *a); a = emallocz(sizeof *a);
...@@ -699,8 +696,6 @@ overwrite: ...@@ -699,8 +696,6 @@ overwrite:
Bprint(ctxt->bso, "t=%d ", s->type); Bprint(ctxt->bso, "t=%d ", s->type);
if(s->dupok) if(s->dupok)
Bprint(ctxt->bso, "dupok "); Bprint(ctxt->bso, "dupok ");
if(s->cfunc)
Bprint(ctxt->bso, "cfunc ");
if(s->nosplit) if(s->nosplit)
Bprint(ctxt->bso, "nosplit "); Bprint(ctxt->bso, "nosplit ");
Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value); Bprint(ctxt->bso, "size=%lld value=%lld", (vlong)s->size, (vlong)s->value);
......
...@@ -20,8 +20,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ...@@ -20,8 +20,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// _cgo_init may update stackguard. // _cgo_init may update stackguard.
MOVL $runtime·g0(SB), BP MOVL $runtime·g0(SB), BP
LEAL (-64*1024+104)(SP), BX LEAL (-64*1024+104)(SP), BX
MOVL BX, g_stackguard0(BP) MOVL BX, g_stackguard(BP)
MOVL BX, g_stackguard1(BP)
MOVL BX, (g_stack+stack_lo)(BP) MOVL BX, (g_stack+stack_lo)(BP)
MOVL SP, (g_stack+stack_hi)(BP) MOVL SP, (g_stack+stack_hi)(BP)
...@@ -51,8 +50,7 @@ nocpuinfo: ...@@ -51,8 +50,7 @@ nocpuinfo:
MOVL $runtime·g0(SB), CX MOVL $runtime·g0(SB), CX
MOVL (g_stack+stack_lo)(CX), AX MOVL (g_stack+stack_lo)(CX), AX
ADDL $const__StackGuard, AX ADDL $const__StackGuard, AX
MOVL AX, g_stackguard0(CX) MOVL AX, g_stackguard(CX)
MOVL AX, g_stackguard1(CX)
// skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows // skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
CMPL runtime·iswindows(SB), $0 CMPL runtime·iswindows(SB), $0
......
...@@ -20,8 +20,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ...@@ -20,8 +20,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// _cgo_init may update stackguard. // _cgo_init may update stackguard.
MOVQ $runtime·g0(SB), DI MOVQ $runtime·g0(SB), DI
LEAQ (-64*1024+104)(SP), BX LEAQ (-64*1024+104)(SP), BX
MOVQ BX, g_stackguard0(DI) MOVQ BX, g_stackguard(DI)
MOVQ BX, g_stackguard1(DI)
MOVQ BX, (g_stack+stack_lo)(DI) MOVQ BX, (g_stack+stack_lo)(DI)
MOVQ SP, (g_stack+stack_hi)(DI) MOVQ SP, (g_stack+stack_hi)(DI)
...@@ -49,8 +48,7 @@ nocpuinfo: ...@@ -49,8 +48,7 @@ nocpuinfo:
MOVQ $runtime·g0(SB), CX MOVQ $runtime·g0(SB), CX
MOVQ (g_stack+stack_lo)(CX), AX MOVQ (g_stack+stack_lo)(CX), AX
ADDQ $const__StackGuard, AX ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(CX) MOVQ AX, g_stackguard(CX)
MOVQ AX, g_stackguard1(CX)
CMPL runtime·iswindows(SB), $0 CMPL runtime·iswindows(SB), $0
JEQ ok JEQ ok
......
...@@ -22,8 +22,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ...@@ -22,8 +22,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
// create istack out of the given (operating system) stack. // create istack out of the given (operating system) stack.
MOVL $runtime·g0(SB), DI MOVL $runtime·g0(SB), DI
LEAL (-64*1024+104)(SP), BX LEAL (-64*1024+104)(SP), BX
MOVL BX, g_stackguard0(DI) MOVL BX, g_stackguard(DI)
MOVL BX, g_stackguard1(DI)
MOVL BX, (g_stack+stack_lo)(DI) MOVL BX, (g_stack+stack_lo)(DI)
MOVL SP, (g_stack+stack_hi)(DI) MOVL SP, (g_stack+stack_hi)(DI)
......
...@@ -32,8 +32,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4 ...@@ -32,8 +32,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$-4
// create istack out of the OS stack // create istack out of the OS stack
MOVW $(-8192+104)(R13), R0 MOVW $(-8192+104)(R13), R0
MOVW R0, g_stackguard0(g) MOVW R0, g_stackguard(g)
MOVW R0, g_stackguard1(g)
MOVW R0, (g_stack+stack_lo)(g) MOVW R0, (g_stack+stack_lo)(g)
MOVW R13, (g_stack+stack_hi)(g) MOVW R13, (g_stack+stack_hi)(g)
...@@ -56,8 +55,7 @@ nocgo: ...@@ -56,8 +55,7 @@ nocgo:
// update stackguard after _cgo_init // update stackguard after _cgo_init
MOVW (g_stack+stack_lo)(g), R0 MOVW (g_stack+stack_lo)(g), R0
ADD $const__StackGuard, R0 ADD $const__StackGuard, R0
MOVW R0, g_stackguard0(g) MOVW R0, g_stackguard(g)
MOVW R0, g_stackguard1(g)
BL runtime·checkgoarm(SB) BL runtime·checkgoarm(SB)
BL runtime·check(SB) BL runtime·check(SB)
......
...@@ -22,8 +22,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ...@@ -22,8 +22,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0
MOVD $runtime·g0(SB), g MOVD $runtime·g0(SB), g
MOVD $(-64*1024), R31 MOVD $(-64*1024), R31
ADD R31, R1, R3 ADD R31, R1, R3
MOVD R3, g_stackguard0(g) MOVD R3, g_stackguard(g)
MOVD R3, g_stackguard1(g)
MOVD R3, (g_stack+stack_lo)(g) MOVD R3, (g_stack+stack_lo)(g)
MOVD R1, (g_stack+stack_hi)(g) MOVD R1, (g_stack+stack_hi)(g)
......
...@@ -114,7 +114,7 @@ func unlock(l *mutex) { ...@@ -114,7 +114,7 @@ func unlock(l *mutex) {
throw("runtime·unlock: lock count") throw("runtime·unlock: lock count")
} }
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt gp.stackguard = stackPreempt
} }
} }
......
...@@ -115,7 +115,7 @@ func unlock(l *mutex) { ...@@ -115,7 +115,7 @@ func unlock(l *mutex) {
throw("runtime·unlock: lock count") throw("runtime·unlock: lock count")
} }
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt gp.stackguard = stackPreempt
} }
} }
......
...@@ -64,7 +64,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -64,7 +64,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
} }
mp.mallocing = 1 mp.mallocing = 1
if mp.curg != nil { if mp.curg != nil {
mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad mp.curg.stackguard = ^uintptr(0xfff) | 0xbad
} }
} }
...@@ -127,7 +127,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { ...@@ -127,7 +127,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
} }
mp.mallocing = 0 mp.mallocing = 0
if mp.curg != nil { if mp.curg != nil {
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
} }
// Note: one releasem for the acquirem just above. // Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc. // The other for the acquirem at start of malloc.
...@@ -319,7 +319,7 @@ marked: ...@@ -319,7 +319,7 @@ marked:
} }
mp.mallocing = 0 mp.mallocing = 0
if mp.curg != nil { if mp.curg != nil {
mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard mp.curg.stackguard = mp.curg.stack.lo + _StackGuard
} }
// Note: one releasem for the acquirem just above. // Note: one releasem for the acquirem just above.
// The other for the acquirem at start of malloc. // The other for the acquirem at start of malloc.
......
...@@ -179,9 +179,6 @@ func mcommoninit(mp *m) { ...@@ -179,9 +179,6 @@ func mcommoninit(mp *m) {
sched.mcount++ sched.mcount++
checkmcount() checkmcount()
mpreinit(mp) mpreinit(mp)
if mp.gsignal != nil {
mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
}
// Add to allm so garbage collector doesn't free g->m // Add to allm so garbage collector doesn't free g->m
// when it is just in a register or thread-local storage. // when it is just in a register or thread-local storage.
...@@ -213,7 +210,7 @@ func ready(gp *g) { ...@@ -213,7 +210,7 @@ func ready(gp *g) {
} }
_g_.m.locks-- _g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} }
} }
...@@ -463,7 +460,7 @@ func stopg(gp *g) bool { ...@@ -463,7 +460,7 @@ func stopg(gp *g) bool {
if !gp.gcworkdone { if !gp.gcworkdone {
gp.preemptscan = true gp.preemptscan = true
gp.preempt = true gp.preempt = true
gp.stackguard0 = stackPreempt gp.stackguard = stackPreempt
} }
// Unclaim. // Unclaim.
...@@ -545,7 +542,7 @@ func mquiesce(gpmaster *g) { ...@@ -545,7 +542,7 @@ func mquiesce(gpmaster *g) {
gp.gcworkdone = true // scan is a noop gp.gcworkdone = true // scan is a noop
break break
} }
if status == _Grunning && gp.stackguard0 == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg if status == _Grunning && gp.stackguard == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
noteclear(&sched.stopnote) noteclear(&sched.stopnote)
} else { } else {
stopscanstart(gp) stopscanstart(gp)
...@@ -704,7 +701,7 @@ func starttheworld() { ...@@ -704,7 +701,7 @@ func starttheworld() {
} }
_g_.m.locks-- _g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} }
} }
...@@ -725,8 +722,7 @@ func mstart() { ...@@ -725,8 +722,7 @@ func mstart() {
} }
// Initialize stack guards so that we can start calling // Initialize stack guards so that we can start calling
// both Go and C functions with stack growth prologues. // both Go and C functions with stack growth prologues.
_g_.stackguard0 = _g_.stack.lo + _StackGuard _g_.stackguard = _g_.stack.lo + _StackGuard
_g_.stackguard1 = _g_.stackguard0
mstart1() mstart1()
} }
...@@ -806,7 +802,7 @@ func allocm(_p_ *p) *m { ...@@ -806,7 +802,7 @@ func allocm(_p_ *p) *m {
} }
_g_.m.locks-- _g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} }
return mp return mp
...@@ -883,7 +879,7 @@ func needm(x byte) { ...@@ -883,7 +879,7 @@ func needm(x byte) {
_g_ := getg() _g_ := getg()
_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
_g_.stackguard0 = _g_.stack.lo + _StackGuard _g_.stackguard = _g_.stack.lo + _StackGuard
// Initialize this thread to use the m. // Initialize this thread to use the m.
asminit() asminit()
...@@ -1221,7 +1217,7 @@ func execute(gp *g) { ...@@ -1221,7 +1217,7 @@ func execute(gp *g) {
casgstatus(gp, _Grunnable, _Grunning) casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0 gp.waitsince = 0
gp.preempt = false gp.preempt = false
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard = gp.stack.lo + _StackGuard
_g_.m.p.schedtick++ _g_.m.p.schedtick++
_g_.m.curg = gp _g_.m.curg = gp
gp.m = _g_.m gp.m = _g_.m
...@@ -1617,7 +1613,7 @@ func reentersyscall(pc, sp uintptr) { ...@@ -1617,7 +1613,7 @@ func reentersyscall(pc, sp uintptr) {
// (See details in comment above.) // (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that // Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die. // will trip any stack check and leaving a flag to tell newstack to die.
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
_g_.throwsplit = true _g_.throwsplit = true
// Leave SP around for GC and traceback. // Leave SP around for GC and traceback.
...@@ -1648,7 +1644,7 @@ func reentersyscall(pc, sp uintptr) { ...@@ -1648,7 +1644,7 @@ func reentersyscall(pc, sp uintptr) {
// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
// We set _StackGuard to StackPreempt so that first split stack check calls morestack. // We set _StackGuard to StackPreempt so that first split stack check calls morestack.
// Morestack detects this case and throws. // Morestack detects this case and throws.
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
_g_.m.locks-- _g_.m.locks--
} }
...@@ -1686,7 +1682,7 @@ func entersyscallblock(dummy int32) { ...@@ -1686,7 +1682,7 @@ func entersyscallblock(dummy int32) {
_g_.m.locks++ // see comment in entersyscall _g_.m.locks++ // see comment in entersyscall
_g_.throwsplit = true _g_.throwsplit = true
_g_.stackguard0 = stackPreempt // see comment in entersyscall _g_.stackguard = stackPreempt // see comment in entersyscall
// Leave SP around for GC and traceback. // Leave SP around for GC and traceback.
pc := getcallerpc(unsafe.Pointer(&dummy)) pc := getcallerpc(unsafe.Pointer(&dummy))
...@@ -1752,10 +1748,10 @@ func exitsyscall(dummy int32) { ...@@ -1752,10 +1748,10 @@ func exitsyscall(dummy int32) {
_g_.m.locks-- _g_.m.locks--
if _g_.preempt { if _g_.preempt {
// restore the preemption request in case we've cleared it in newstack // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} else { } else {
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
_g_.stackguard0 = _g_.stack.lo + _StackGuard _g_.stackguard = _g_.stack.lo + _StackGuard
} }
_g_.throwsplit = false _g_.throwsplit = false
return return
...@@ -1873,7 +1869,7 @@ func beforefork() { ...@@ -1873,7 +1869,7 @@ func beforefork() {
// Code between fork and exec must not allocate memory nor even try to grow stack. // Code between fork and exec must not allocate memory nor even try to grow stack.
// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
// runtime_AfterFork will undo this in parent process, but not in child. // runtime_AfterFork will undo this in parent process, but not in child.
gp.stackguard0 = stackFork gp.stackguard = stackFork
} }
// Called from syscall package before fork. // Called from syscall package before fork.
...@@ -1887,7 +1883,7 @@ func afterfork() { ...@@ -1887,7 +1883,7 @@ func afterfork() {
gp := getg().m.curg gp := getg().m.curg
// See the comment in beforefork. // See the comment in beforefork.
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard = gp.stack.lo + _StackGuard
hz := sched.profilehz hz := sched.profilehz
if hz != 0 { if hz != 0 {
...@@ -1911,8 +1907,7 @@ func malg(stacksize int32) *g { ...@@ -1911,8 +1907,7 @@ func malg(stacksize int32) *g {
systemstack(func() { systemstack(func() {
newg.stack = stackalloc(uint32(stacksize)) newg.stack = stackalloc(uint32(stacksize))
}) })
newg.stackguard0 = newg.stack.lo + _StackGuard newg.stackguard = newg.stack.lo + _StackGuard
newg.stackguard1 = ^uintptr(0)
} }
return newg return newg
} }
...@@ -2008,7 +2003,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr ...@@ -2008,7 +2003,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
} }
_g_.m.locks-- _g_.m.locks--
if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} }
return newg return newg
} }
...@@ -2027,7 +2022,7 @@ func gfput(_p_ *p, gp *g) { ...@@ -2027,7 +2022,7 @@ func gfput(_p_ *p, gp *g) {
stackfree(gp.stack) stackfree(gp.stack)
gp.stack.lo = 0 gp.stack.lo = 0
gp.stack.hi = 0 gp.stack.hi = 0
gp.stackguard0 = 0 gp.stackguard = 0
} }
gp.schedlink = _p_.gfree gp.schedlink = _p_.gfree
...@@ -2073,7 +2068,7 @@ retry: ...@@ -2073,7 +2068,7 @@ retry:
systemstack(func() { systemstack(func() {
gp.stack = stackalloc(_FixedStack) gp.stack = stackalloc(_FixedStack)
}) })
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard = gp.stack.lo + _StackGuard
} else { } else {
if raceenabled { if raceenabled {
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
...@@ -2778,10 +2773,10 @@ func preemptone(_p_ *p) bool { ...@@ -2778,10 +2773,10 @@ func preemptone(_p_ *p) bool {
gp.preempt = true gp.preempt = true
// Every call in a go routine checks for stack overflow by // Every call in a go routine checks for stack overflow by
// comparing the current stack pointer to gp->stackguard0. // comparing the current stack pointer to gp->stackguard.
// Setting gp->stackguard0 to StackPreempt folds // Setting gp->stackguard to StackPreempt folds
// preemption into the normal stack overflow check. // preemption into the normal stack overflow check.
gp.stackguard0 = stackPreempt gp.stackguard = stackPreempt
return true return true
} }
......
...@@ -386,7 +386,7 @@ func releasem(mp *m) { ...@@ -386,7 +386,7 @@ func releasem(mp *m) {
mp.locks-- mp.locks--
if mp.locks == 0 && _g_.preempt { if mp.locks == 0 && _g_.preempt {
// restore the preemption request in case we've cleared it in newstack // restore the preemption request in case we've cleared it in newstack
_g_.stackguard0 = stackPreempt _g_.stackguard = stackPreempt
} }
} }
......
...@@ -154,14 +154,10 @@ type stack struct { ...@@ -154,14 +154,10 @@ type stack struct {
type g struct { type g struct {
// Stack parameters. // Stack parameters.
// stack describes the actual stack memory: [stack.lo, stack.hi). // stack describes the actual stack memory: [stack.lo, stack.hi).
// stackguard0 is the stack pointer compared in the Go stack growth prologue. // stackguard is the stack pointer compared in the Go stack growth prologue.
// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
// stackguard1 is the stack pointer compared in the C stack growth prologue. stack stack // offset known to runtime/cgo
// It is stack.lo+StackGuard on g0 and gsignal stacks. stackguard uintptr // offset known to liblink
// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
stack stack // offset known to runtime/cgo
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
_panic *_panic // innermost panic - offset known to liblink _panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer _defer *_defer // innermost defer
...@@ -175,7 +171,7 @@ type g struct { ...@@ -175,7 +171,7 @@ type g struct {
waitreason string // if status==gwaiting waitreason string // if status==gwaiting
schedlink *g schedlink *g
issystem bool // do not output in stack dump, ignore in deadlock detector issystem bool // do not output in stack dump, ignore in deadlock detector
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt preempt bool // preemption signal, duplicates stackguard = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc preemptscan bool // preempted g does scan for gc
gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
......
...@@ -26,13 +26,13 @@ const ( ...@@ -26,13 +26,13 @@ const (
poisonStack = uintptrMask & 0x6868686868686868 poisonStack = uintptrMask & 0x6868686868686868
// Goroutine preemption request. // Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure. // Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp. // Must be greater than any real sp.
// 0xfffffade in hex. // 0xfffffade in hex.
stackPreempt = uintptrMask & -1314 stackPreempt = uintptrMask & -1314
// Thread is forking. // Thread is forking.
// Stored into g->stackguard0 to cause split stack check failure. // Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp. // Must be greater than any real sp.
stackFork = uintptrMask & -1234 stackFork = uintptrMask & -1234
) )
...@@ -566,7 +566,7 @@ func copystack(gp *g, newsize uintptr) { ...@@ -566,7 +566,7 @@ func copystack(gp *g, newsize uintptr) {
// Swap out old stack for new one // Swap out old stack for new one
gp.stack = new gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request gp.stackguard = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used gp.sched.sp = new.hi - used
// free old stack // free old stack
...@@ -611,7 +611,7 @@ func round2(x int32) int32 { ...@@ -611,7 +611,7 @@ func round2(x int32) int32 {
func newstack() { func newstack() {
thisg := getg() thisg := getg()
// TODO: double check all gp. shouldn't be getg(). // TODO: double check all gp. shouldn't be getg().
if thisg.m.morebuf.g.stackguard0 == stackFork { if thisg.m.morebuf.g.stackguard == stackFork {
throw("stack growth after fork") throw("stack growth after fork")
} }
if thisg.m.morebuf.g != thisg.m.curg { if thisg.m.morebuf.g != thisg.m.curg {
...@@ -674,7 +674,7 @@ func newstack() { ...@@ -674,7 +674,7 @@ func newstack() {
writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt)) writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
} }
if gp.stackguard0 == stackPreempt { if gp.stackguard == stackPreempt {
if gp == thisg.m.g0 { if gp == thisg.m.g0 {
throw("runtime: preempt g0") throw("runtime: preempt g0")
} }
...@@ -689,7 +689,7 @@ func newstack() { ...@@ -689,7 +689,7 @@ func newstack() {
gcphasework(gp) gcphasework(gp)
casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting) casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
casgstatus(gp, _Gwaiting, _Grunning) casgstatus(gp, _Gwaiting, _Grunning)
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard = gp.stack.lo + _StackGuard
gp.preempt = false gp.preempt = false
gp.preemptscan = false // Tells the GC premption was successful. gp.preemptscan = false // Tells the GC premption was successful.
gogo(&gp.sched) // never return gogo(&gp.sched) // never return
...@@ -700,7 +700,7 @@ func newstack() { ...@@ -700,7 +700,7 @@ func newstack() {
if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning { if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
// Let the goroutine keep running for now. // Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time. // gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard gp.stackguard = gp.stack.lo + _StackGuard
casgstatus(gp, _Gwaiting, _Grunning) casgstatus(gp, _Gwaiting, _Grunning)
gogo(&gp.sched) // never return gogo(&gp.sched) // never return
} }
...@@ -804,10 +804,3 @@ func shrinkfinish() { ...@@ -804,10 +804,3 @@ func shrinkfinish() {
s = t s = t
} }
} }
//go:nosplit
func morestackc() {
systemstack(func() {
throw("attempt to execute C code on Go stack")
})
}
...@@ -97,7 +97,7 @@ const ( ...@@ -97,7 +97,7 @@ const (
) )
// Goroutine preemption request. // Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure. // Stored into g->stackguard to cause split stack check failure.
// Must be greater than any real sp. // Must be greater than any real sp.
// 0xfffffade in hex. // 0xfffffade in hex.
const ( const (
......
...@@ -148,8 +148,7 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0 ...@@ -148,8 +148,7 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
MOVL AX, (g_stack+stack_hi)(DX) MOVL AX, (g_stack+stack_hi)(DX)
SUBL $(64*1024), AX // stack size SUBL $(64*1024), AX // stack size
MOVL AX, (g_stack+stack_lo)(DX) MOVL AX, (g_stack+stack_lo)(DX)
MOVL AX, g_stackguard0(DX) MOVL AX, g_stackguard(DX)
MOVL AX, g_stackguard1(DX)
// Initialize procid from TOS struct. // Initialize procid from TOS struct.
MOVL _tos(SB), AX MOVL _tos(SB), AX
......
...@@ -145,8 +145,7 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0 ...@@ -145,8 +145,7 @@ TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
MOVQ AX, (g_stack+stack_hi)(DX) MOVQ AX, (g_stack+stack_hi)(DX)
SUBQ $(64*1024), AX // stack size SUBQ $(64*1024), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX) MOVQ AX, (g_stack+stack_lo)(DX)
MOVQ AX, g_stackguard0(DX) MOVQ AX, g_stackguard(DX)
MOVQ AX, g_stackguard1(DX)
// Initialize procid from TOS struct. // Initialize procid from TOS struct.
MOVQ _tos(SB), AX MOVQ _tos(SB), AX
......
...@@ -134,8 +134,7 @@ TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0 ...@@ -134,8 +134,7 @@ TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0
SUBQ $(0x100000), AX // stack size SUBQ $(0x100000), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX) MOVQ AX, (g_stack+stack_lo)(DX)
ADDQ $const__StackGuard, AX ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(DX) MOVQ AX, g_stackguard(DX)
MOVQ AX, g_stackguard1(DX)
// Someday the convention will be D is always cleared. // Someday the convention will be D is always cleared.
CLD CLD
......
...@@ -209,8 +209,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 ...@@ -209,8 +209,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0
LEAL -8192(SP), CX LEAL -8192(SP), CX
MOVL CX, (g_stack+stack_lo)(SP) MOVL CX, (g_stack+stack_lo)(SP)
ADDL $const__StackGuard, CX ADDL $const__StackGuard, CX
MOVL CX, g_stackguard0(SP) MOVL CX, g_stackguard(SP)
MOVL CX, g_stackguard1(SP)
MOVL DX, (g_stack+stack_hi)(SP) MOVL DX, (g_stack+stack_hi)(SP)
PUSHL 16(BP) // arg for handler PUSHL 16(BP) // arg for handler
...@@ -315,8 +314,7 @@ TEXT runtime·tstart(SB),NOSPLIT,$0 ...@@ -315,8 +314,7 @@ TEXT runtime·tstart(SB),NOSPLIT,$0
SUBL $(64*1024), AX // stack size SUBL $(64*1024), AX // stack size
MOVL AX, (g_stack+stack_lo)(DX) MOVL AX, (g_stack+stack_lo)(DX)
ADDL $const__StackGuard, AX ADDL $const__StackGuard, AX
MOVL AX, g_stackguard0(DX) MOVL AX, g_stackguard(DX)
MOVL AX, g_stackguard1(DX)
// Set up tls. // Set up tls.
LEAL m_tls(CX), SI LEAL m_tls(CX), SI
......
...@@ -246,8 +246,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 ...@@ -246,8 +246,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0
LEAQ -8192(SP), CX LEAQ -8192(SP), CX
MOVQ CX, (g_stack+stack_lo)(SP) MOVQ CX, (g_stack+stack_lo)(SP)
ADDQ $const__StackGuard, CX ADDQ $const__StackGuard, CX
MOVQ CX, g_stackguard0(SP) MOVQ CX, g_stackguard(SP)
MOVQ CX, g_stackguard1(SP)
MOVQ DX, (g_stack+stack_hi)(SP) MOVQ DX, (g_stack+stack_hi)(SP)
PUSHQ 32(BP) // arg for handler PUSHQ 32(BP) // arg for handler
...@@ -356,8 +355,7 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0 ...@@ -356,8 +355,7 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
SUBQ $(64*1024), AX // stack size SUBQ $(64*1024), AX // stack size
MOVQ AX, (g_stack+stack_lo)(DX) MOVQ AX, (g_stack+stack_lo)(DX)
ADDQ $const__StackGuard, AX ADDQ $const__StackGuard, AX
MOVQ AX, g_stackguard0(DX) MOVQ AX, g_stackguard(DX)
MOVQ AX, g_stackguard1(DX)
// Set up tls. // Set up tls.
LEAQ m_tls(CX), SI LEAQ m_tls(CX), SI
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment