|
| 1 | +use crate::{ |
| 2 | + frame::Frame, |
| 3 | + masm::{MacroAssembler, OperandSize, RegImm}, |
| 4 | + reg::Reg, |
| 5 | + regalloc::RegAlloc, |
| 6 | + stack::{Stack, Val}, |
| 7 | +}; |
| 8 | + |
| 9 | +/// The code generation context. |
| 10 | +/// The code generation context is made up of three |
| 11 | +/// essential data structures: |
| 12 | +/// |
| 13 | +/// * The register allocator, in charge of keeping the inventory of register |
| 14 | +/// availability. |
| 15 | +/// * The value stack, which keeps track of the state of the values |
| 16 | +/// after each operation. |
| 17 | +/// * The current function's frame. |
| 18 | +/// |
| 19 | +/// These data structures normally require cooperating with each other |
| 20 | +/// to perform most of the operations needed during the code |
| 21 | +/// generation process. The code generation context should |
| 22 | +/// be generally used as the single entry point to access |
| 23 | +/// the compound functionality provided by its elements. |
| 24 | +pub(crate) struct CodeGenContext<'a> { |
| 25 | + /// The register allocator. |
| 26 | + pub regalloc: RegAlloc, |
| 27 | + /// The value stack. |
| 28 | + pub stack: Stack, |
| 29 | + /// The current function's frame. |
| 30 | + pub frame: &'a Frame, |
| 31 | +} |
| 32 | + |
| 33 | +impl<'a> CodeGenContext<'a> { |
| 34 | + /// Create a new code generation context. |
| 35 | + pub fn new(regalloc: RegAlloc, stack: Stack, frame: &'a Frame) -> Self { |
| 36 | + Self { |
| 37 | + regalloc, |
| 38 | + stack, |
| 39 | + frame, |
| 40 | + } |
| 41 | + } |
| 42 | + |
| 43 | + /// Request a specific general purpose register to the register allocator, |
| 44 | + /// spilling if not available. |
| 45 | + pub fn gpr<M: MacroAssembler>(&mut self, named: Reg, masm: &mut M) -> Reg { |
| 46 | + self.regalloc.gpr(named, &mut |regalloc| { |
| 47 | + Self::spill(&mut self.stack, regalloc, &self.frame, masm) |
| 48 | + }) |
| 49 | + } |
| 50 | + |
| 51 | + /// Request the next avaiable general purpose register to the register allocator, |
| 52 | + /// spilling if no registers are available. |
| 53 | + pub fn any_gpr<M: MacroAssembler>(&mut self, masm: &mut M) -> Reg { |
| 54 | + self.regalloc |
| 55 | + .any_gpr(&mut |regalloc| Self::spill(&mut self.stack, regalloc, &self.frame, masm)) |
| 56 | + } |
| 57 | + |
| 58 | + /// Free the given general purpose register. |
| 59 | + pub fn free_gpr(&mut self, reg: Reg) { |
| 60 | + self.regalloc.free_gpr(reg); |
| 61 | + } |
| 62 | + |
| 63 | + /// Loads the stack top value into a register, if it isn't already one; |
| 64 | + /// spilling if there are no registers available. |
| 65 | + pub fn pop_to_reg<M: MacroAssembler>(&mut self, masm: &mut M, size: OperandSize) -> Reg { |
| 66 | + if let Some(reg) = self.stack.pop_reg() { |
| 67 | + return reg; |
| 68 | + } |
| 69 | + |
| 70 | + let dst = self.any_gpr(masm); |
| 71 | + let val = self.stack.pop().expect("a value at stack top"); |
| 72 | + Self::move_val_to_reg(val, dst, masm, self.frame, size); |
| 73 | + dst |
| 74 | + } |
| 75 | + |
| 76 | + /// Checks if the stack top contains the given register. The register |
| 77 | + /// gets allocated otherwise, potentially causing a spill. |
| 78 | + /// Once the requested register is allocated, the value at the top of the stack |
| 79 | + /// gets loaded into the register. |
| 80 | + pub fn pop_to_named_reg<M: MacroAssembler>( |
| 81 | + &mut self, |
| 82 | + masm: &mut M, |
| 83 | + named: Reg, |
| 84 | + size: OperandSize, |
| 85 | + ) -> Reg { |
| 86 | + if let Some(reg) = self.stack.pop_named_reg(named) { |
| 87 | + return reg; |
| 88 | + } |
| 89 | + |
| 90 | + let dst = self.gpr(named, masm); |
| 91 | + let val = self.stack.pop().expect("a value at stack top"); |
| 92 | + Self::move_val_to_reg(val, dst, masm, self.frame, size); |
| 93 | + dst |
| 94 | + } |
| 95 | + |
| 96 | + fn move_val_to_reg<M: MacroAssembler>( |
| 97 | + src: Val, |
| 98 | + dst: Reg, |
| 99 | + masm: &mut M, |
| 100 | + frame: &Frame, |
| 101 | + size: OperandSize, |
| 102 | + ) { |
| 103 | + match src { |
| 104 | + Val::Reg(src) => masm.mov(RegImm::reg(src), RegImm::reg(dst), size), |
| 105 | + Val::I32(imm) => masm.mov(RegImm::imm(imm.into()), RegImm::reg(dst), size), |
| 106 | + Val::I64(imm) => masm.mov(RegImm::imm(imm), RegImm::reg(dst), size), |
| 107 | + Val::Local(index) => { |
| 108 | + let slot = frame |
| 109 | + .get_local(index) |
| 110 | + .expect(&format!("valid locat at index = {}", index)); |
| 111 | + let addr = masm.local_address(&slot); |
| 112 | + masm.load(addr, dst, slot.ty.into()); |
| 113 | + } |
| 114 | + v => panic!("Unsupported value {:?}", v), |
| 115 | + }; |
| 116 | + } |
| 117 | + |
| 118 | + /// Prepares arguments for emitting an i32 binary operation. |
| 119 | + pub fn i32_binop<F, M>(&mut self, masm: &mut M, emit: &mut F) |
| 120 | + where |
| 121 | + F: FnMut(&mut M, RegImm, RegImm, OperandSize), |
| 122 | + M: MacroAssembler, |
| 123 | + { |
| 124 | + let top = self.stack.peek().expect("value at stack top"); |
| 125 | + |
| 126 | + if top.is_i32_const() { |
| 127 | + let val = self |
| 128 | + .stack |
| 129 | + .pop_i32_const() |
| 130 | + .expect("i32 const value at stack top"); |
| 131 | + let reg = self.pop_to_reg(masm, OperandSize::S32); |
| 132 | + emit( |
| 133 | + masm, |
| 134 | + RegImm::reg(reg), |
| 135 | + RegImm::imm(val as i64), |
| 136 | + OperandSize::S32, |
| 137 | + ); |
| 138 | + self.stack.push(Val::reg(reg)); |
| 139 | + } else { |
| 140 | + let src = self.pop_to_reg(masm, OperandSize::S32); |
| 141 | + let dst = self.pop_to_reg(masm, OperandSize::S32); |
| 142 | + emit(masm, dst.into(), src.into(), OperandSize::S32); |
| 143 | + self.regalloc.free_gpr(src); |
| 144 | + self.stack.push(Val::reg(dst)); |
| 145 | + } |
| 146 | + } |
| 147 | + |
| 148 | + /// Prepares arguments for emitting an i64 binary operation. |
| 149 | + pub fn i64_binop<F, M>(&mut self, masm: &mut M, emit: &mut F) |
| 150 | + where |
| 151 | + F: FnMut(&mut M, RegImm, RegImm, OperandSize), |
| 152 | + M: MacroAssembler, |
| 153 | + { |
| 154 | + let top = self.stack.peek().expect("value at stack top"); |
| 155 | + if top.is_i64_const() { |
| 156 | + let val = self |
| 157 | + .stack |
| 158 | + .pop_i64_const() |
| 159 | + .expect("i64 const value at stack top"); |
| 160 | + let reg = self.pop_to_reg(masm, OperandSize::S64); |
| 161 | + emit(masm, RegImm::reg(reg), RegImm::imm(val), OperandSize::S64); |
| 162 | + self.stack.push(Val::reg(reg)); |
| 163 | + } else { |
| 164 | + let src = self.pop_to_reg(masm, OperandSize::S64); |
| 165 | + let dst = self.pop_to_reg(masm, OperandSize::S64); |
| 166 | + emit(masm, dst.into(), src.into(), OperandSize::S64); |
| 167 | + self.regalloc.free_gpr(src); |
| 168 | + self.stack.push(Val::reg(dst)); |
| 169 | + } |
| 170 | + } |
| 171 | + |
| 172 | + /// Spill locals and registers to memory. |
| 173 | + // TODO optimize the spill range; |
| 174 | + // |
| 175 | + // At any point in the program, the stack |
| 176 | + // might already contain Memory entries; |
| 177 | + // we could effectively ignore that range; |
| 178 | + // only focusing on the range that contains |
| 179 | + // spillable values. |
| 180 | + fn spill<M: MacroAssembler>( |
| 181 | + stack: &mut Stack, |
| 182 | + regalloc: &mut RegAlloc, |
| 183 | + frame: &Frame, |
| 184 | + masm: &mut M, |
| 185 | + ) { |
| 186 | + stack.inner_mut().iter_mut().for_each(|v| match v { |
| 187 | + Val::Reg(r) => { |
| 188 | + let offset = masm.push(*r); |
| 189 | + regalloc.free_gpr(*r); |
| 190 | + *v = Val::Memory(offset); |
| 191 | + } |
| 192 | + Val::Local(index) => { |
| 193 | + let slot = frame.get_local(*index).expect("valid local at slot"); |
| 194 | + let addr = masm.local_address(&slot); |
| 195 | + masm.load(addr, regalloc.scratch, slot.ty.into()); |
| 196 | + let offset = masm.push(regalloc.scratch); |
| 197 | + *v = Val::Memory(offset); |
| 198 | + } |
| 199 | + v => { |
| 200 | + println!("trying to spill something unknown {:?}", v); |
| 201 | + } |
| 202 | + }); |
| 203 | + } |
| 204 | +} |
0 commit comments