aboutsummaryrefslogtreecommitdiffhomepage
path: root/ir/mem2reg.c
diff options
context:
space:
mode:
authorlemon <lsof@mailbox.org>2025-12-20 17:59:40 +0100
committerlemon <lsof@mailbox.org>2025-12-20 19:31:50 +0100
commite6fbab42185f4fb4e3a3b1e3e93eaa5d3d81b7c7 (patch)
treedba8dd1a91f532dd104dc1299b32e79babacb499 /ir/mem2reg.c
parenta5009ae762541c29e9a123bf70877261db4ff628 (diff)
backend: unify pass memory allocation strategies
It was all over the place for temporary data structures used by individual passes. Now there is an arena specifically for that, which is nicer.
Diffstat (limited to 'ir/mem2reg.c')
-rw-r--r--ir/mem2reg.c29
1 files changed, 6 insertions, 23 deletions
diff --git a/ir/mem2reg.c b/ir/mem2reg.c
index f9ad245..a1a4132 100644
--- a/ir/mem2reg.c
+++ b/ir/mem2reg.c
@@ -23,6 +23,7 @@ static const uchar load2ext[] = {
/* Implements algorithm in 'Simple and Efficient Construction of Static Single Assignment' (Braun et al) */
struct ssabuilder {
+ struct arena **arena;
imap_of(union ref *) curdefs; /* map of var to (map of block to def of var) */
struct bitset *sealed, /* set of sealed blocks */
*marked; /* blocks marked, for 'Marker Algorithm' in the paper */
@@ -98,7 +99,7 @@ writevar(struct ssabuilder *sb, int var, struct block *blk, union ref val)
{
union ref **pcurdefs;
if (!(pcurdefs = imap_get(&sb->curdefs, var))) {
- pcurdefs = imap_set(&sb->curdefs, var, xcalloc(sb->nblk * sizeof(union ref)));
+ pcurdefs = imap_set(&sb->curdefs, var, allocz(sb->arena, sb->nblk * sizeof(union ref), 0));
}
if (val.t == RTMP) assert(instrtab[val.i].op != Onop);
(*pcurdefs)[blk->id] = val;
@@ -222,25 +223,15 @@ cmpuse(const void *a, const void *b)
void
mem2reg(struct function *fn)
{
- static struct bitset bsbuf[2][4];
- struct ssabuilder sb = { .nblk = fn->nblk };
- struct block *blk;
+ struct ssabuilder sb = { fn->passarena, .nblk = fn->nblk };
FREQUIRE(FNUSE);
- if (fn->nblk <= BSNBIT * countof(bsbuf[0])) {
- sb.sealed = bsbuf[0];
- sb.marked = bsbuf[1];
- memset(bsbuf[0], 0, BSSIZE(fn->nblk) * sizeof *bsbuf[0]);
- memset(bsbuf[1], 0, BSSIZE(fn->nblk) * sizeof *bsbuf[1]);
- } else {
- sb.sealed = xcalloc(BSSIZE(fn->nblk) * sizeof *sb.sealed);
- sb.marked = xcalloc(BSSIZE(fn->nblk) * sizeof *sb.marked);
- }
-
+ sb.sealed = allocz(sb.arena, BSSIZE(fn->nblk) * sizeof *sb.sealed, 0);
+ sb.marked = allocz(sb.arena, BSSIZE(fn->nblk) * sizeof *sb.sealed, 0);
sortrpo(fn);
- blk = fn->entry;
+ struct block *blk = fn->entry;
do {
for (int i = 0; i < blk->ins.n; ++i) {
struct use *use, *uend;
@@ -326,14 +317,6 @@ mem2reg(struct function *fn)
delphi(blk, i--);
} while ((blk = blk->lnext) != fn->entry);
- if (sb.sealed != bsbuf[0]) {
- free(sb.sealed);
- free(sb.marked);
- }
-
- for (int i = 0; i < sb.curdefs.mb.N; ++i)
- if (bstest(sb.curdefs.mb.bs, i))
- free(sb.curdefs.v[i]);
imap_free(&sb.curdefs);
if (ccopt.dbg.m) {
bfmt(ccopt.dbgout, "<< After mem2reg >>\n");