fix dynamic stack allocs for amd64

The arm64 might have the same problem but it
is currently unable to handle them even in
instruction selection.

Thanks to Jean Dao for reporting the bug.
This commit is contained in:
Quentin Carbonneaux 2017-07-27 19:48:54 -04:00
parent 64c79edda0
commit 2b64b75c84
4 changed files with 41 additions and 4 deletions

View file

@ -488,10 +488,10 @@ emitins(Ins i, Fn *fn, FILE *f)
}
}
static int
static uint64_t
framesz(Fn *fn)
{
int i, o, f;
uint64_t i, o, f;
/* specific to NAlign == 3 */
for (i=0, o=0; i<NCLR; i++)
@ -512,7 +512,8 @@ amd64_emitfn(Fn *fn, FILE *f)
static int id0;
Blk *b, *s;
Ins *i, itmp;
int *r, c, fs, o, n, lbl;
int *r, c, o, n, lbl;
uint64_t fs;
fprintf(f, ".text\n");
if (fn->export)
@ -525,7 +526,7 @@ amd64_emitfn(Fn *fn, FILE *f)
);
fs = framesz(fn);
if (fs)
fprintf(f, "\tsub $%d, %%rsp\n", fs);
fprintf(f, "\tsub $%"PRIu64", %%rsp\n", fs);
if (fn->vararg) {
o = -176;
for (r=amd64_sysv_rsave; r<&amd64_sysv_rsave[6]; r++, o+=8)
@ -537,6 +538,7 @@ amd64_emitfn(Fn *fn, FILE *f)
if (fn->reg & BIT(*r)) {
itmp.arg[0] = TMP(*r);
emitf("pushq %L0", &itmp, fn, f);
fs += 8;
}
for (lbl=0, b=fn->start; b; b=b->link) {
@ -547,6 +549,12 @@ amd64_emitfn(Fn *fn, FILE *f)
lbl = 1;
switch (b->jmp.type) {
case Jret0:
if (fn->dynalloc)
fprintf(f,
"\tmovq %%rbp, %%rsp\n"
"\tsubq $%"PRIu64", %%rsp\n",
fs
);
for (r=&amd64_sysv_rclob[NCLR]; r>amd64_sysv_rclob;)
if (fn->reg & BIT(*--r)) {
itmp.arg[0] = TMP(*r);

View file

@ -291,6 +291,7 @@ Emit:
* the stack remains aligned
* (rsp = 0) mod 16
*/
fn->dynalloc = 1;
if (rtype(i.arg[0]) == RCon) {
sz = fn->con[i.arg[0].val].bits.i;
if (sz < 0 || sz >= INT_MAX-15)