Commit 1a6576db authored by Russ Cox's avatar Russ Cox

cmd/5l, cmd/6l, cmd/8l: refactor stack split code

Pull the stack split generation into its own function.
This will make an upcoming change to fix recover
easier to digest.

R=ken2
CC=golang-dev
https://golang.org/cl/13611044
parent bab302de
......@@ -38,6 +38,10 @@ static Sym* sym_div;
static Sym* sym_divu;
static Sym* sym_mod;
static Sym* sym_modu;
static Sym* symmorestack;
static Prog* pmorestack;
static Prog* stacksplit(Prog*, int32);
static void
linkcase(Prog *casep)
......@@ -58,9 +62,7 @@ noops(void)
{
Prog *p, *q, *q1, *q2;
int o;
int32 arg;
Prog *pmorestack;
Sym *symmorestack, *tlsfallback, *gmsym;
Sym *tlsfallback, *gmsym;
/*
* find leaf subroutines
......@@ -256,136 +258,8 @@ noops(void)
break;
}
if(!(p->reg & NOSPLIT)) {
// MOVW g_stackguard(g), R1
p = appendp(p);
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->to.type = D_REG;
p->to.reg = 1;
if(autosize <= StackSmall) {
// small stack: SP < stackguard
// CMP stackguard, SP
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 1;
p->reg = REGSP;
} else if(autosize <= StackBig) {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-autosize(SP), R2
// CMP stackguard, R2
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.reg = REGSP;
p->from.offset = -autosize;
p->to.type = D_REG;
p->to.reg = 2;
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 1;
p->reg = 2;
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(autosize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = appendp(p);
p->as = ACMP;
p->from.type = D_CONST;
p->from.offset = (uint32)StackPreempt;
p->reg = 1;
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.reg = REGSP;
p->from.offset = StackGuard;
p->to.type = D_REG;
p->to.reg = 2;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = ASUB;
p->from.type = D_REG;
p->from.reg = 1;
p->to.type = D_REG;
p->to.reg = 2;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.offset = autosize + (StackGuard - StackSmall);
p->to.type = D_REG;
p->to.reg = 3;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 3;
p->reg = 2;
p->scond = C_SCOND_NE;
}
// MOVW.LS $autosize, R1
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
p->from.offset = autosize;
p->to.type = D_REG;
p->to.reg = 1;
// MOVW.LS $args, R2
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
arg = cursym->text->to.offset2;
if(arg == 1) // special marker for known 0
arg = 0;
if(arg&3)
diag("misaligned argument size in stack split");
p->from.offset = arg;
p->to.type = D_REG;
p->to.reg = 2;
// MOVW.LS R14, R3
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_REG;
p->from.reg = REGLINK;
p->to.type = D_REG;
p->to.reg = 3;
// BL.LS runtime.morestack(SB) // modifies LR, returns with LO still asserted
p = appendp(p);
p->as = ABL;
p->scond = C_SCOND_LS;
p->to.type = D_BRANCH;
p->to.sym = symmorestack;
p->cond = pmorestack;
// BLS start
p = appendp(p);
p->as = ABLS;
p->to.type = D_BRANCH;
p->cond = cursym->text->link;
}
if(!(p->reg & NOSPLIT))
p = stacksplit(p, autosize); // emit split check
// MOVW.W R14,$-autosize(SP)
p = appendp(p);
......@@ -554,6 +428,143 @@ noops(void)
}
}
static Prog*
stacksplit(Prog *p, int32 framesize)
{
int32 arg;
// MOVW g_stackguard(g), R1
p = appendp(p);
p->as = AMOVW;
p->from.type = D_OREG;
p->from.reg = REGG;
p->to.type = D_REG;
p->to.reg = 1;
if(framesize <= StackSmall) {
// small stack: SP < stackguard
// CMP stackguard, SP
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 1;
p->reg = REGSP;
} else if(framesize <= StackBig) {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-framesize(SP), R2
// CMP stackguard, R2
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.reg = REGSP;
p->from.offset = -framesize;
p->to.type = D_REG;
p->to.reg = 2;
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 1;
p->reg = 2;
} else {
// Such a large stack we need to protect against wraparound
// if SP is close to zero.
// SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
// The +StackGuard on both sides is required to keep the left side positive:
// SP is allowed to be slightly below stackguard. See stack.h.
// CMP $StackPreempt, R1
// MOVW.NE $StackGuard(SP), R2
// SUB.NE R1, R2
// MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
// CMP.NE R3, R2
p = appendp(p);
p->as = ACMP;
p->from.type = D_CONST;
p->from.offset = (uint32)StackPreempt;
p->reg = 1;
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.reg = REGSP;
p->from.offset = StackGuard;
p->to.type = D_REG;
p->to.reg = 2;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = ASUB;
p->from.type = D_REG;
p->from.reg = 1;
p->to.type = D_REG;
p->to.reg = 2;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = AMOVW;
p->from.type = D_CONST;
p->from.offset = framesize + (StackGuard - StackSmall);
p->to.type = D_REG;
p->to.reg = 3;
p->scond = C_SCOND_NE;
p = appendp(p);
p->as = ACMP;
p->from.type = D_REG;
p->from.reg = 3;
p->reg = 2;
p->scond = C_SCOND_NE;
}
// MOVW.LS $framesize, R1
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
p->from.offset = framesize;
p->to.type = D_REG;
p->to.reg = 1;
// MOVW.LS $args, R2
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_CONST;
arg = cursym->text->to.offset2;
if(arg == 1) // special marker for known 0
arg = 0;
if(arg&3)
diag("misaligned argument size in stack split");
p->from.offset = arg;
p->to.type = D_REG;
p->to.reg = 2;
// MOVW.LS R14, R3
p = appendp(p);
p->as = AMOVW;
p->scond = C_SCOND_LS;
p->from.type = D_REG;
p->from.reg = REGLINK;
p->to.type = D_REG;
p->to.reg = 3;
// BL.LS runtime.morestack(SB) // modifies LR, returns with LO still asserted
p = appendp(p);
p->as = ABL;
p->scond = C_SCOND_LS;
p->to.type = D_BRANCH;
p->to.sym = symmorestack;
p->cond = pmorestack;
// BLS start
p = appendp(p);
p->as = ABLS;
p->to.type = D_BRANCH;
p->cond = cursym->text->link;
return p;
}
static void
sigdiv(char *n)
{
......
This diff is collapsed.
......@@ -405,15 +405,19 @@ brloop(Prog *p)
return q;
}
static Prog* load_g_cx(Prog*);
static Prog* stacksplit(Prog*, int32, Prog**);
static Sym *plan9_tos;
static Prog *pmorestack;
static Sym *symmorestack;
void
dostkoff(void)
{
Prog *p, *q, *q1;
int32 autoffset, deltasp;
int a, arg;
Prog *pmorestack;
Sym *symmorestack;
Sym *plan9_tos;
int a;
pmorestack = P;
symmorestack = lookup("runtime.morestack", 0);
......@@ -440,9 +444,131 @@ dostkoff(void)
q = P;
q1 = P;
if(pmorestack != P)
if(!(p->from.scale & NOSPLIT)) {
p = appendp(p); // load g into CX
p = appendp(p);
p = load_g_cx(p); // load g into CX
p = stacksplit(p, autoffset, &q); // emit split check
}
if(autoffset) {
p = appendp(p);
p->as = AADJSP;
p->from.type = D_CONST;
p->from.offset = autoffset;
p->spadj = autoffset;
if(q != P)
q->pcond = p;
} else {
// zero-byte stack adjustment.
// Insert a fake non-zero adjustment so that stkcheck can
// recognize the end of the stack-splitting prolog.
p = appendp(p);
p->as = ANOP;
p->spadj = -PtrSize;
p = appendp(p);
p->as = ANOP;
p->spadj = PtrSize;
}
deltasp = autoffset;
if(debug['Z'] && autoffset && !(cursym->text->from.scale&NOSPLIT)) {
// 8l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
p = appendp(p);
p->as = AMOVL;
p->from.type = D_SP;
p->to.type = D_DI;
p = appendp(p);
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = autoffset/4;
p->to.type = D_CX;
p = appendp(p);
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = 0;
p->to.type = D_AX;
p = appendp(p);
p->as = AREP;
p = appendp(p);
p->as = ASTOSL;
}
for(; p != P; p = p->link) {
a = p->from.type;
if(a == D_AUTO)
p->from.offset += deltasp;
if(a == D_PARAM)
p->from.offset += deltasp + 4;
a = p->to.type;
if(a == D_AUTO)
p->to.offset += deltasp;
if(a == D_PARAM)
p->to.offset += deltasp + 4;
switch(p->as) {
default:
continue;
case APUSHL:
case APUSHFL:
deltasp += 4;
p->spadj = 4;
continue;
case APUSHW:
case APUSHFW:
deltasp += 2;
p->spadj = 2;
continue;
case APOPL:
case APOPFL:
deltasp -= 4;
p->spadj = -4;
continue;
case APOPW:
case APOPFW:
deltasp -= 2;
p->spadj = -2;
continue;
case ARET:
break;
}
if(autoffset != deltasp)
diag("unbalanced PUSH/POP");
if(autoffset) {
p->as = AADJSP;
p->from.type = D_CONST;
p->from.offset = -autoffset;
p->spadj = -autoffset;
p = appendp(p);
p->as = ARET;
// If there are instructions following
// this ARET, they come from a branch
// with the same stackframe, so undo
// the cleanup.
p->spadj = +autoffset;
}
if(p->to.sym) // retjmp
p->as = AJMP;
}
}
}
// Append code to p to load g into cx.
// Overwrites p with the first instruction (no first appendp).
// Overwriting p is unusual but it lets use this in both the
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
static Prog*
load_g_cx(Prog *p)
{
switch(HEADTYPE) {
case Hwindows:
p->as = AMOVL;
......@@ -498,6 +624,20 @@ dostkoff(void)
p->from.offset = tlsoffset + 0;
p->to.type = D_CX;
}
return p;
}
// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in CX.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
static Prog*
stacksplit(Prog *p, int32 framesize, Prog **jmpok)
{
Prog *q, *q1;
int arg;
if(debug['K']) {
// 8l -K means check not only for stack
......@@ -528,21 +668,21 @@ dostkoff(void)
}
q1 = P;
if(autoffset <= StackSmall) {
if(framesize <= StackSmall) {
// small stack: SP <= stackguard
// CMPL SP, stackguard
p = appendp(p);
p->as = ACMPL;
p->from.type = D_SP;
p->to.type = D_INDIR+D_CX;
} else if(autoffset <= StackBig) {
} else if(framesize <= StackBig) {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAL -(autoffset-StackSmall)(SP), AX
// LEAL -(framesize-StackSmall)(SP), AX
// CMPL AX, stackguard
p = appendp(p);
p->as = ALEAL;
p->from.type = D_INDIR+D_SP;
p->from.offset = -(autoffset-StackSmall);
p->from.offset = -(framesize-StackSmall);
p->to.type = D_AX;
p = appendp(p);
......@@ -563,7 +703,7 @@ dostkoff(void)
// JEQ label-of-call-to-morestack
// LEAL StackGuard(SP), AX
// SUBL stackguard, AX
// CMPL AX, $(autoffset+(StackGuard-StackSmall))
// CMPL AX, $(framesize+(StackGuard-StackSmall))
p = appendp(p);
p->as = AMOVL;
p->from.type = D_INDIR+D_CX;
......@@ -597,7 +737,7 @@ dostkoff(void)
p->as = ACMPL;
p->from.type = D_AX;
p->to.type = D_CONST;
p->to.offset = autoffset+(StackGuard-StackSmall);
p->to.offset = framesize+(StackGuard-StackSmall);
}
// common
......@@ -620,8 +760,8 @@ dostkoff(void)
// that did a stack check. If StackMin is enough, don't ask for a specific
// amount: then we can use the custom functions and save a few
// instructions.
if(StackTop + cursym->text->to.offset2 + PtrSize + autoffset + PtrSize + StackLimit >= StackMin)
p->from.offset = (autoffset+7) & ~7LL;
if(StackTop + cursym->text->to.offset2 + PtrSize + framesize + PtrSize + StackLimit >= StackMin)
p->from.offset = (framesize+7) & ~7LL;
arg = cursym->text->to.offset2;
if(arg == 1) // special marker for known 0
......@@ -644,121 +784,14 @@ dostkoff(void)
p->as = AJMP;
p->to.type = D_BRANCH;
p->pcond = cursym->text->link;
}
if(q != P)
q->pcond = p->link;
if(q1 != P)
q1->pcond = q->link;
if(autoffset) {
p = appendp(p);
p->as = AADJSP;
p->from.type = D_CONST;
p->from.offset = autoffset;
p->spadj = autoffset;
if(q != P)
q->pcond = p;
} else {
// zero-byte stack adjustment.
// Insert a fake non-zero adjustment so that stkcheck can
// recognize the end of the stack-splitting prolog.
p = appendp(p);
p->as = ANOP;
p->spadj = -PtrSize;
p = appendp(p);
p->as = ANOP;
p->spadj = PtrSize;
}
deltasp = autoffset;
if(debug['Z'] && autoffset && !(cursym->text->from.scale&NOSPLIT)) {
// 8l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
p = appendp(p);
p->as = AMOVL;
p->from.type = D_SP;
p->to.type = D_DI;
p = appendp(p);
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = autoffset/4;
p->to.type = D_CX;
p = appendp(p);
p->as = AMOVL;
p->from.type = D_CONST;
p->from.offset = 0;
p->to.type = D_AX;
p = appendp(p);
p->as = AREP;
p = appendp(p);
p->as = ASTOSL;
}
for(; p != P; p = p->link) {
a = p->from.type;
if(a == D_AUTO)
p->from.offset += deltasp;
if(a == D_PARAM)
p->from.offset += deltasp + 4;
a = p->to.type;
if(a == D_AUTO)
p->to.offset += deltasp;
if(a == D_PARAM)
p->to.offset += deltasp + 4;
switch(p->as) {
default:
continue;
case APUSHL:
case APUSHFL:
deltasp += 4;
p->spadj = 4;
continue;
case APUSHW:
case APUSHFW:
deltasp += 2;
p->spadj = 2;
continue;
case APOPL:
case APOPFL:
deltasp -= 4;
p->spadj = -4;
continue;
case APOPW:
case APOPFW:
deltasp -= 2;
p->spadj = -2;
continue;
case ARET:
break;
}
if(autoffset != deltasp)
diag("unbalanced PUSH/POP");
if(autoffset) {
p->as = AADJSP;
p->from.type = D_CONST;
p->from.offset = -autoffset;
p->spadj = -autoffset;
p = appendp(p);
p->as = ARET;
// If there are instructions following
// this ARET, they come from a branch
// with the same stackframe, so undo
// the cleanup.
p->spadj = +autoffset;
}
if(p->to.sym) // retjmp
p->as = AJMP;
}
}
*jmpok = q;
return p;
}
int32
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment