diff --git a/src/arch-lower.c b/src/arch-lower.c new file mode 100644 index 00000000..5d7e8e66 --- /dev/null +++ b/src/arch-lower.c @@ -0,0 +1,62 @@ +/* + * shecc - Architecture-specific IR lowering stage + * + * Introduces a minimal arch-lowering boundary that applies target-specific + * tweaks to phase-2 IR (ph2_ir) before final code generation. This keeps + * backends simpler by moving decisions that depend on CFG shape or target + * quirks out of emit-time where possible. + */ + +#include "../config" +#include "defs.h" + +/* ARM-specific lowering: + * - Mark detached conditional branches so codegen can decide between + * short/long forms without re-deriving CFG shape. + */ +void arm_lower(void) +{ + for (func_t *func = FUNC_LIST.head; func; func = func->next) { + for (basic_block_t *bb = func->bbs; bb; bb = bb->rpo_next) { + for (ph2_ir_t *insn = bb->ph2_ir_list.head; insn; + insn = insn->next) { + /* Mark branches that don't fall through to next block */ + if (insn->op == OP_branch) { + /* In SSA, we index 'else_bb' first, and then 'then_bb' */ + insn->is_branch_detached = (insn->else_bb != bb->rpo_next); + } + } + } + } +} + +/* RISC-V-specific lowering: + * - Mark detached conditional branches + * - Future: prepare for RISC-V specific patterns + */ +void riscv_lower(void) +{ + for (func_t *func = FUNC_LIST.head; func; func = func->next) { + for (basic_block_t *bb = func->bbs; bb; bb = bb->rpo_next) { + for (ph2_ir_t *insn = bb->ph2_ir_list.head; insn; + insn = insn->next) { + /* Mark branches that don't fall through to next block */ + if (insn->op == OP_branch) + insn->is_branch_detached = (insn->else_bb != bb->rpo_next); + } + } + } +} + +/* Entry point: dispatch to the active architecture. */ +void arch_lower(void) +{ +#if ELF_MACHINE == 0x28 /* ARM */ + arm_lower(); +#elif ELF_MACHINE == 0xf3 /* RISC-V */ + riscv_lower(); +#else + /* Unknown architecture: keep behavior as-is. */ + (void) 0; +#endif +} diff --git a/src/arm-codegen.c b/src/arm-codegen.c index db6ac641..c0c8293c 100644 --- a/src/arm-codegen.c +++ b/src/arm-codegen.c @@ -172,11 +172,7 @@ void cfg_flatten(void) flatten_ir->src1 = bb->belong_to->stack_size; } - if (insn->op == OP_branch) { - /* In SSA, we index 'else_bb' first, and then 'then_bb' */ - if (insn->else_bb != bb->rpo_next) - flatten_ir->is_branch_detached = true; - } + /* Branch detachment is determined in the arch-lowering stage */ update_elf_offset(flatten_ir); } diff --git a/src/globals.c b/src/globals.c index 2001a456..df8abefe 100644 --- a/src/globals.c +++ b/src/globals.c @@ -615,6 +615,9 @@ ph2_ir_t *add_ph2_ir(opcode_t op) { ph2_ir_t *ph2_ir = arena_alloc(BB_ARENA, sizeof(ph2_ir_t)); ph2_ir->op = op; + /* Set safe defaults; arch-lowering may annotate later */ + ph2_ir->next = NULL; + ph2_ir->is_branch_detached = 0; return add_existed_ph2_ir(ph2_ir); } diff --git a/src/main.c b/src/main.c index d8427d77..e59de5a7 100644 --- a/src/main.c +++ b/src/main.c @@ -37,6 +37,9 @@ /* Peephole optimization */ #include "peephole.c" +/* Arch-specific IR lowering boundary */ +#include "arch-lower.c" + /* Machine code generation. support ARMv7-A and RV32I */ #include "codegen.c" @@ -103,6 +106,9 @@ int main(int argc, char *argv[]) peephole(); + /* Apply arch-specific IR tweaks before final codegen */ + arch_lower(); + /* flatten CFG to linear instruction */ cfg_flatten(); diff --git a/src/reg-alloc.c b/src/reg-alloc.c index c9c80c1f..fd025b48 100644 --- a/src/reg-alloc.c +++ b/src/reg-alloc.c @@ -55,6 +55,9 @@ ph2_ir_t *bb_add_ph2_ir(basic_block_t *bb, opcode_t op) { ph2_ir_t *n = arena_alloc(BB_ARENA, sizeof(ph2_ir_t)); n->op = op; + /* Ensure deterministic defaults for newly created IR nodes */ + n->next = NULL; /* well-formed singly linked list */ + n->is_branch_detached = 0; /* arch-lowering will set for branches */ if (!bb->ph2_ir_list.head) bb->ph2_ir_list.head = n;