diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 171ca30f5f4..5806e62bf95 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -10,7 +10,7 @@ use fxhash::FxHasher64; use iter_extended::vecmap; use noirc_frontend::hir_def::types::Type as HirType; -use crate::ssa::{ir::function::RuntimeType, opt::flatten_cfg::value_merger::ValueMerger}; +use crate::ssa::opt::flatten_cfg::value_merger::ValueMerger; use super::{ basic_block::BasicBlockId, @@ -506,7 +506,7 @@ impl Instruction { } } - pub(crate) fn can_eliminate_if_unused(&self, function: &Function) -> bool { + pub(crate) fn can_eliminate_if_unused(&self, function: &Function, flattened: bool) -> bool { use Instruction::*; match self { Binary(binary) => { @@ -539,8 +539,7 @@ impl Instruction { // pass where this check is done, but does mean that we cannot perform mem2reg // after the DIE pass. Store { .. } => { - matches!(function.runtime(), RuntimeType::Acir(_)) - && function.reachable_blocks().len() == 1 + flattened && function.runtime().is_acir() && function.reachable_blocks().len() == 1 } Constrain(..) diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index eed1af8251b..48e55bc49e5 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -22,9 +22,17 @@ use super::rc::{pop_rc_for, RcInstruction}; impl Ssa { /// Performs Dead Instruction Elimination (DIE) to remove any instructions with /// unused results. + /// + /// This step should come after the flattening of the CFG and mem2reg. #[tracing::instrument(level = "trace", skip(self))] - pub(crate) fn dead_instruction_elimination(mut self) -> Ssa { - self.functions.par_iter_mut().for_each(|(_, func)| func.dead_instruction_elimination(true)); + pub(crate) fn dead_instruction_elimination(self) -> Ssa { + self.dead_instruction_elimination_inner(true) + } + + fn dead_instruction_elimination_inner(mut self, flattened: bool) -> Ssa { + self.functions + .par_iter_mut() + .for_each(|(_, func)| func.dead_instruction_elimination(true, flattened)); self } @@ -37,8 +45,12 @@ impl Function { /// instructions that reference results from an instruction in another block are evaluated first. /// If we did not iterate blocks in this order we could not safely say whether or not the results /// of its instructions are needed elsewhere. - pub(crate) fn dead_instruction_elimination(&mut self, insert_out_of_bounds_checks: bool) { - let mut context = Context::default(); + pub(crate) fn dead_instruction_elimination( + &mut self, + insert_out_of_bounds_checks: bool, + flattened: bool, + ) { + let mut context = Context { flattened, ..Default::default() }; for call_data in &self.dfg.data_bus.call_data { context.mark_used_instruction_results(&self.dfg, call_data.array_id); } @@ -58,7 +70,7 @@ impl Function { // instructions (we don't want to remove those checks, or instructions that are // dependencies of those checks) if inserted_out_of_bounds_checks { - self.dead_instruction_elimination(false); + self.dead_instruction_elimination(false, flattened); return; } @@ -76,6 +88,11 @@ struct Context { /// they technically contain side-effects but we still want to remove them if their /// `value` parameter is not used elsewhere. rc_instructions: Vec<(InstructionId, BasicBlockId)>, + + /// The elimination of certain unused instructions assumes that the DIE pass runs after + /// the flattening of the CFG, but if that's not the case then we should not eliminate + /// them just yet. + flattened: bool, } impl Context { @@ -172,7 +189,7 @@ impl Context { fn is_unused(&self, instruction_id: InstructionId, function: &Function) -> bool { let instruction = &function.dfg[instruction_id]; - if instruction.can_eliminate_if_unused(function) { + if instruction.can_eliminate_if_unused(function, self.flattened) { let results = function.dfg.instruction_results(instruction_id); results.iter().all(|result| !self.used_values.contains(result)) } else if let Instruction::Call { func, arguments } = instruction { @@ -941,4 +958,53 @@ mod test { let ssa = ssa.dead_instruction_elimination(); assert_normalized_ssa_equals(ssa, src); } + + #[test] + fn do_not_remove_mutable_reference_params() { + let src = " + acir(inline) fn main f0 { + b0(v0: Field, v1: Field): + v2 = allocate -> &mut Field + store v0 at v2 + call f1(v2) + v4 = load v2 -> Field + v5 = eq v4, v1 + constrain v4 == v1 + return + } + acir(inline) fn Add10 f1 { + b0(v0: &mut Field): + v1 = load v0 -> Field + v2 = load v0 -> Field + v4 = add v2, Field 10 + store v4 at v0 + return + } + "; + + let ssa = Ssa::from_str(src).unwrap(); + + // Even though these ACIR functions only have 1 block, we have not inlined and flattened anything yet. + let ssa = ssa.dead_instruction_elimination_inner(false); + + let expected = " + acir(inline) fn main f0 { + b0(v0: Field, v1: Field): + v2 = allocate -> &mut Field + store v0 at v2 + call f1(v2) + v4 = load v2 -> Field + constrain v4 == v1 + return + } + acir(inline) fn Add10 f1 { + b0(v0: &mut Field): + v1 = load v0 -> Field + v3 = add v1, Field 10 + store v3 at v0 + return + } + "; + assert_normalized_ssa_equals(ssa, expected); + } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 79181b7e74e..37718ac0904 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -990,7 +990,7 @@ fn brillig_bytecode_size(function: &Function) -> usize { simplify_between_unrolls(&mut temp); // This is to try to prevent hitting ICE. - temp.dead_instruction_elimination(false); + temp.dead_instruction_elimination(false, true); convert_ssa_function(&temp, false).byte_code.len() }