Skip to content
Closed
Show file tree
Hide file tree
Changes from 20 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions compiler/noirc_evaluator/src/ssa/ir/basic_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ impl BasicBlock {
&self.parameters
}

/// Returns a mutable reference to the parameters of this block
pub(crate) fn parameters_mut(&mut self) -> &mut Vec<ValueId> {
&mut self.parameters
}

/// Removes all the parameters of this block
pub(crate) fn take_parameters(&mut self) -> Vec<ValueId> {
std::mem::take(&mut self.parameters)
Expand Down Expand Up @@ -154,4 +159,16 @@ impl BasicBlock {
| None => vec![].into_iter(),
}
}

/// Returns the [CallStackId] associated with this block's terminator
/// Panics if this block does not have a terminator.
pub(crate) fn call_stack(&self) -> CallStackId {
use TerminatorInstruction::*;
match self.unwrap_terminator() {
Jmp { call_stack, .. }
| JmpIf { call_stack, .. }
| Return { call_stack, .. }
| Unreachable { call_stack, .. } => *call_stack,
}
}
}
10 changes: 10 additions & 0 deletions compiler/noirc_evaluator/src/ssa/ir/dfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -877,6 +877,16 @@ impl std::ops::Index<ValueId> for DataFlowGraph {
}
}

impl std::ops::IndexMut<ValueId> for DataFlowGraph {
fn index_mut(&mut self, id: ValueId) -> &mut Self::Output {
let value = &mut self.values[id];
if matches!(value, Value::Global(_)) {
panic!("Cannot mutate a global ValueId! {id}");
}
value
}
}

impl std::ops::Index<BasicBlockId> for DataFlowGraph {
type Output = BasicBlock;
fn index(&self, id: BasicBlockId) -> &Self::Output {
Expand Down
3 changes: 3 additions & 0 deletions compiler/noirc_evaluator/src/ssa/ir/dom.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@ impl DominatorTree {
// comparison on the reverse post-order may allow to test whether we have passed "a"
// without waiting until we reach the root of the tree.
loop {
if let Some(res) = self.cache.get(&(block_a_id, block_b_id)) {
return *res;
}
match self.reverse_post_order_cmp(block_a_id, block_b_id) {
Ordering::Less => {
block_b_id = match self.immediate_dominator(block_b_id) {
Expand Down
46 changes: 36 additions & 10 deletions compiler/noirc_evaluator/src/ssa/ir/function_inserter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ use rustc_hash::FxHashMap as HashMap;
pub(crate) struct FunctionInserter<'f> {
pub(crate) function: &'f mut Function,

values: HashMap<ValueId, ValueId>,
/// Reaching into `values` directly from outside this module is discouraged but
/// unfortunately sometimes necessary to avoid double mutable borrows of `function`.
pub(crate) values: HashMap<ValueId, ValueId>,
}

impl<'f> FunctionInserter<'f> {
Expand All @@ -28,9 +30,24 @@ impl<'f> FunctionInserter<'f> {
/// If there is no updated value for this id, this returns the same
/// ValueId that was passed in.
pub(crate) fn resolve(&self, value: ValueId) -> ValueId {
match self.values.get(&value) {
Some(new_value) => self.resolve(*new_value),
None => value,
Self::resolve_detached(value, &self.values)
}

/// Resolves a ValueId to its new, updated value.
/// If there is no updated value for this id, this returns the same
/// ValueId that was passed in.
///
/// Unlike [Self::resolve], this function does not borrow self, allowing it to be used when
/// [Self::function] is also mutably borrowed.
pub(crate) fn resolve_detached(
mut value: ValueId,
values: &HashMap<ValueId, ValueId>,
) -> ValueId {
loop {
match values.get(&value) {
Some(new_value) => value = *new_value,
None => break value,
}
}
}

Expand All @@ -42,27 +59,36 @@ impl<'f> FunctionInserter<'f> {
self.values.remove(&key);
} else {
#[cfg(debug_assertions)]
self.validate_map_value(key, value);
Self::validate_map_value(key, value, &self.values);
self.values.entry(key).or_insert(value);
}
}

/// Insert a key, value pair in the map
pub(crate) fn map_value(&mut self, key: ValueId, value: ValueId) {
Self::map_value_detached(key, value, &mut self.values);
}

/// Insert a key, value pair in the map without borrowing `self.function`
pub(crate) fn map_value_detached(
key: ValueId,
value: ValueId,
values: &mut HashMap<ValueId, ValueId>,
) {
if key == value {
self.values.remove(&key);
values.remove(&key);
} else {
#[cfg(debug_assertions)]
self.validate_map_value(key, value);
self.values.insert(key, value);
Self::validate_map_value(key, value, values);
values.insert(key, value);
}
}

/// Sanity check that we are not creating back-and-forth cycles.
/// Doesn't catch longer cycles, but has detected mistakes with reusing instructions.
#[cfg(debug_assertions)]
fn validate_map_value(&self, key: ValueId, value: ValueId) {
if let Some(value_of_value) = self.values.get(&value) {
fn validate_map_value(key: ValueId, value: ValueId, values: &HashMap<ValueId, ValueId>) {
if let Some(value_of_value) = values.get(&value) {
assert!(*value_of_value != key, "mapping short-circuit: {key} <-> {value}");
}
}
Expand Down
8 changes: 8 additions & 0 deletions compiler/noirc_evaluator/src/ssa/ir/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,14 @@ impl Type {
}
}
}

/// If this is a reference type, return the type it references.
pub(crate) fn reference_element_type(&self) -> Option<&Type> {
match self {
Type::Reference(element_type) => Some(element_type.as_ref()),
_ => None,
}
}
}

/// Composite Types are essentially flattened struct or tuple types.
Expand Down
15 changes: 11 additions & 4 deletions compiler/noirc_evaluator/src/ssa/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ pub fn primary_passes(options: &SsaEvaluatorOptions) -> Vec<SsaPass<'_>> {
"Inlining",
),
// Run mem2reg with the CFG separated into blocks
SsaPass::new(Ssa::mem2reg, "Mem2Reg"),
SsaPass::new(Ssa::mem2reg_simple, "Mem2Reg"),
SsaPass::new(Ssa::mem2reg, "Mem2Reg Complex"),
// Running DIE here might remove some unused instructions mem2reg could not eliminate.
SsaPass::new(
Ssa::dead_instruction_elimination_pre_flattening,
Expand All @@ -173,7 +174,7 @@ pub fn primary_passes(options: &SsaEvaluatorOptions) -> Vec<SsaPass<'_>> {
"Unrolling",
),
SsaPass::new(Ssa::simplify_cfg, "Simplifying"),
SsaPass::new(Ssa::mem2reg, "Mem2Reg"),
SsaPass::new(Ssa::mem2reg, "Mem2Reg Complex"),
SsaPass::new(Ssa::remove_bit_shifts, "Removing Bit Shifts"),
// Expand signed lt/div/mod after "Removing Bit Shifts" because that pass might
// introduce signed divisions.
Expand All @@ -182,7 +183,7 @@ pub fn primary_passes(options: &SsaEvaluatorOptions) -> Vec<SsaPass<'_>> {
SsaPass::new(Ssa::flatten_cfg, "Flattening"),
// Run mem2reg once more with the flattened CFG to catch any remaining loads/stores,
// then try to free memory before inlining, which involves copying a instructions.
SsaPass::new(Ssa::mem2reg, "Mem2Reg").and_then(Ssa::remove_unused_instructions),
SsaPass::new(Ssa::mem2reg, "Mem2Reg Complex").and_then(Ssa::remove_unused_instructions),
// Run the inlining pass again to handle functions with `InlineType::NoPredicates`.
// Before flattening is run, we treat functions marked with the `InlineType::NoPredicates` as an entry point.
// This pass must come immediately following `mem2reg` as the succeeding passes
Expand Down Expand Up @@ -225,7 +226,7 @@ pub fn primary_passes(options: &SsaEvaluatorOptions) -> Vec<SsaPass<'_>> {
.and_then(Ssa::remove_unreachable_functions),
// We cannot run mem2reg after DIE, because it removes Store instructions.
// We have to run it before, to give it a chance to turn Store+Load into known values.
SsaPass::new(Ssa::mem2reg, "Mem2Reg"),
SsaPass::new(Ssa::mem2reg, "Mem2Reg Complex"),
SsaPass::new(Ssa::dead_instruction_elimination, "Dead Instruction Elimination"),
SsaPass::new(Ssa::brillig_entry_point_analysis, "Brillig Entry Point Analysis")
// Remove any potentially unnecessary duplication from the Brillig entry point analysis.
Expand All @@ -242,6 +243,12 @@ pub fn primary_passes(options: &SsaEvaluatorOptions) -> Vec<SsaPass<'_>> {
),
SsaPass::new(Ssa::remove_unreachable_instructions, "Remove Unreachable Instructions")
.and_then(Ssa::remove_unreachable_functions),
SsaPass::new(Ssa::mem2reg, "Mem2Reg Complex"),
SsaPass::new(Ssa::mem2reg_simple, "Mem2Reg Simple"),
SsaPass::new(
|ssa| ssa.fold_constants_using_constraints(options.constant_folding_max_iter),
"Constant Folding using constraints",
),
SsaPass::new(Ssa::dead_instruction_elimination, "Dead Instruction Elimination")
// A function can be potentially unreachable post-DIE if all calls to that function were removed.
.and_then(Ssa::remove_unreachable_functions),
Expand Down
11 changes: 10 additions & 1 deletion compiler/noirc_evaluator/src/ssa/opt/constant_folding/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,8 @@ impl Context {
let use_constraint_info = self.use_constraint_info;
let is_make_array = matches!(instruction, Instruction::MakeArray { .. });

let first_result = instruction_results.first().copied();

let cache_instruction = || {
let predicate = self.cache_predicate(side_effects_enabled_var, instruction, dfg);
// If we see this make_array again, we can reuse the current result.
Expand All @@ -523,8 +525,15 @@ impl Context {
match can_be_deduplicated {
CanBeDeduplicated::Always => cache_instruction(),
CanBeDeduplicated::UnderSamePredicate if use_constraint_info => cache_instruction(),

// We also allow deduplicating MakeArray instructions that we have tracked which haven't been mutated.
_ if is_make_array => cache_instruction(),
// We disallow this for arrays containing references however, since this can impact
// reference aliasing.
_ if is_make_array
&& !dfg.type_of_value(first_result.unwrap()).contains_reference() =>
{
cache_instruction();
}

CanBeDeduplicated::UnderSamePredicate | CanBeDeduplicated::Never => {}
}
Expand Down
5 changes: 3 additions & 2 deletions compiler/noirc_evaluator/src/ssa/opt/die.rs
Original file line number Diff line number Diff line change
Expand Up @@ -526,8 +526,9 @@ fn can_be_eliminated_if_unused(
/// pass and after the last mem2reg pass. This is currently the case for the DIE
/// pass where this check is done, but does mean that we cannot perform mem2reg
/// after the DIE pass.
fn should_remove_store(func: &Function, flattened: bool) -> bool {
flattened && func.runtime().is_acir() && func.reachable_blocks().len() == 1
fn should_remove_store(_func: &Function, _flattened: bool) -> bool {
// Removing stores breaks mem2reg_simple
false //&& flattened && func.runtime().is_acir() && func.reachable_blocks().len() == 1
}

/// Check pre-execution properties:
Expand Down
10 changes: 9 additions & 1 deletion compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@
//! performed before loop unrolling to try to allow for mutable variables used for loop indices.
mod alias_set;
mod block;
mod cfg;

use std::collections::{BTreeMap, BTreeSet};

Expand Down Expand Up @@ -790,7 +791,14 @@ impl<'f> PerFunctionContext<'f> {
TerminatorInstruction::JmpIf { .. } | TerminatorInstruction::Unreachable { .. } => (), // Nothing to do
TerminatorInstruction::Jmp { destination, arguments, .. } => {
let destination_parameters = self.inserter.function.dfg[*destination].parameters();
assert_eq!(destination_parameters.len(), arguments.len());
assert_eq!(
destination_parameters.len(),
arguments.len(),
"Block {block} has {} argument(s) for destination {destination} with {} parameter(s) in function:\n{}",
arguments.len(),
destination_parameters.len(),
self.inserter.function,
);

// If we have multiple parameters that alias that same argument value,
// then those parameters also alias each other.
Expand Down
130 changes: 130 additions & 0 deletions compiler/noirc_evaluator/src/ssa/opt/mem2reg/cfg.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
//! Process the cfg for mem2reg_simple to ensure that for each node with multiple predecessors,
//! all of those predecessors have only a single child node. This ensures all of these predecessors
//! will end in [TerminatorInstruction::Jmp] rather than [TerminatorInstruction::JmpIf], allowing
//! arguments to be passed to the merge node.
use crate::ssa::ir::function::Function;
use crate::ssa::ir::{cfg::ControlFlowGraph, instruction::TerminatorInstruction};
use crate::ssa::ssa_gen::Ssa;

impl Ssa {
pub(crate) fn process_cfg_for_mem2reg_simple(mut self) -> Self {
// For each function, ensure that any block with multiple predecessors has those
// predecessors end in a single-successor jump. If a predecessor currently has
// multiple successors (e.g. due to a JmpIf), create a new forwarding block that
// contains only a Jmp terminator to the original target and retarget the

Check warning on line 14 in compiler/noirc_evaluator/src/ssa/opt/mem2reg/cfg.rs

View workflow job for this annotation

GitHub Actions / Code

Unknown word (retarget)
// predecessor to jump to the new forwarding block. This guarantees that the
// predecessor's terminator can become a single-successor Jmp, allowing later
// passes to append arguments to the jump.
for function in self.functions.values_mut() {
function.process_cfg();
}
self
}
}

impl Function {
/// Goal: For every block with multiple predecessors, each predecessor should only have a single successor.
/// To accomplish this, when we find such a predecessor with multiple successors, we create a new block in-between
/// such that the predecessor now targets this new block and the new block targets only the original block.
fn process_cfg(&mut self) {
// We don't need to update this cfg as we create new blocks since we only process
// each block once and the newly created blocks will not need to be processed either.
let cfg = ControlFlowGraph::with_function(self);

for block in self.reachable_blocks() {
let predecessors = cfg.predecessors(block);
if predecessors.len() <= 1 {
continue;
}

for predecessor in predecessors {
if cfg.successors(predecessor).len() <= 1 {
continue;
}

// We have `predecessor -> block`, create a new block such that `predecessor -> new_block -> block`.
// This block doesn't need any arguments since `predecessor` is a jmpif and can't
// pass arguments to `block` anyway.
let new_block = self.dfg.make_block();
let call_stack = self.dfg[predecessor].call_stack();
self.dfg[new_block].set_terminator(TerminatorInstruction::Jmp {
destination: block,
arguments: Vec::new(),
call_stack,
});

// And change the predecessor to jump to `new_block` instead of `block`
self.dfg[predecessor].unwrap_terminator_mut().mutate_blocks(|destination| {
if destination == block { new_block } else { destination }
});
}
}
}
}

#[cfg(test)]
mod test {
use crate::{assert_ssa_snapshot, ssa::ssa_gen::Ssa};

#[test]
fn add_arg_to_jmpif_block_regression() {
// b0 -> b1 -> b2 -> b4 -> b6
// | ^ | \ /
// V \ | \ /
// b3 \ V VV
// b5 <------b7
//
// b5 has multiple predecessors: b2 and b7, but b2 has multiple successors.
// we need to create a new block b8 such that `b2 -> b8 -> b5` so that each predecessor of b5
// only has one successor.
// Similarly, b7 also has multiple predecessors, but b4 has multiple successors.
let src = "
acir(inline) fn to_le_bits f19 {
b0(v0: Field):
jmp b1(u32 0)
b1(v12: u32):
jmpif u1 0 then: b2, else: b3
b2():
jmpif u1 0 then: b4, else: b5
b3():
return v0
b4():
jmpif u1 0 then: b6, else: b7
b5():
jmp b1(u32 0)
b6():
jmp b7()
b7():
jmp b5()
}";

let ssa = Ssa::from_str(src).unwrap();
let ssa = ssa.process_cfg_for_mem2reg_simple();

// The addition of b8 and b9 are the only changes
assert_ssa_snapshot!(ssa, @r"
acir(inline) fn to_le_bits f0 {
b0(v0: Field):
jmp b1(u32 0)
b1(v1: u32):
jmpif u1 0 then: b2, else: b3
b2():
jmpif u1 0 then: b4, else: b8
b3():
return v0
b4():
jmpif u1 0 then: b6, else: b9
b5():
jmp b1(u32 0)
b6():
jmp b7()
b7():
jmp b5()
b8():
jmp b5()
b9():
jmp b7()
}
");
}
}
Loading
Loading