diff --git a/TODO.md b/TODO.md index 1ffe3d5b..1077c1a4 100644 --- a/TODO.md +++ b/TODO.md @@ -19,7 +19,7 @@ - Lev's trick to skip some low-level modular reduction - Sumcheck, case z = 0, no need to fold, only keep first half of the values (done in PR 33 by Lambda) (and also in WHIR?) - Custom AVX2 / AVX512 / Neon implem in Plonky3 for all of the finite field operations (done for degree 4 extension, but not degree 5) -- Many times, we evaluate different multilinear polynomials (diferent columns of the same table etc) at a common point. OPTI = compute the eq(.) once, and then dot_product with everything +- Many times, we evaluate different multilinear polynomials (different columns of the same table etc) at a common point. OPTI = compute the eq(.) once, and then dot_product with everything - To commit to multiple AIR table using 1 single pcs, the most general form our "packed pcs" api should accept is: a list of n (n not a power of 2) columns, each ending with m repeated values (in this manner we can reduce proof size when they are a lot of columns (poseidons ...)) - in the runner of leanISA program, if we call 2 times the same function with the same arguments, we can reuse the same memory frame @@ -95,7 +95,7 @@ But we reduce proof size a lot using instead (TODO): - About range checks, that can currently be done in 3 cycles (see 2.5.3 of the zkVM pdf), in the instruction encoding of DEREF, if we replaced (1 - AUX) by a dedicated column, we could allow DEREFS that 'does not do anything with the resulting value', which is exactly what we want for range check: we only want to ensure that m[m[fp + x]] (resp m[(t-1) - m[fp + x]]) is a valid memory access (i.e. the index is < M the memory size), but currently the DEREF instruction forces us to 'store' the result, in m[fp + i] (resp m[fp + k]). - TLDR: adding a new encoding field for DEREF would save 2 memory cells / range check. If this can also increase perf in alternative scenario (other instructions for isntance), + TLDR: adding a new encoding field for DEREF would save 2 memory cells / range check. If this can also increase perf in alternative scenario (other instructions for instance), potentially we should consider it. ## Known leanISA compiler bugs: diff --git a/crates/lean_prover/src/common.rs b/crates/lean_prover/src/common.rs index 45327132..f7fa9603 100644 --- a/crates/lean_prover/src/common.rs +++ b/crates/lean_prover/src/common.rs @@ -66,7 +66,7 @@ pub fn fold_bytecode(bytecode: &Bytecode, folding_challenges: &MultilinearPoint< fold_multilinear_chunks(&encoded_bytecode, folding_challenges) } -pub fn intitial_and_final_pc_conditions( +pub fn initial_and_final_pc_conditions( bytecode: &Bytecode, log_n_cycles: usize, ) -> (Evaluation, Evaluation) { @@ -126,7 +126,7 @@ pub fn add_memory_statements_for_dot_product_precompile( return Err(ProofError::InvalidProof); } if entry.n_vars() >= log_public_memory { - todo!("vm multilinear eval accross multiple memory chunks") + todo!("vm multilinear eval across multiple memory chunks") } let addr_bits = to_big_endian_in_field(entry.addr_coeffs, log_memory - entry.n_vars()); let statement = Evaluation::new([addr_bits, entry.point.clone()].concat(), entry.res); @@ -418,8 +418,8 @@ pub fn add_poseidon_lookup_statements_on_indexes( log_n_p24: usize, index_lookup_point: &MultilinearPoint, inner_values: &[EF], - p16_indexe_statements: [&mut Vec>; 4], // a, b, res_1, res_2 - p24_indexe_statements: [&mut Vec>; 3], // a, b, res + p16_index_statements: [&mut Vec>; 4], // a, b, res_1, res_2 + p24_index_statements: [&mut Vec>; 3], // a, b, res ) { assert_eq!(inner_values.len(), 7); let mut idx_point_right_p16 = MultilinearPoint(index_lookup_point[3..].to_vec()); @@ -428,13 +428,13 @@ pub fn add_poseidon_lookup_statements_on_indexes( if log_n_p16 < log_n_p24 { std::mem::swap(&mut idx_point_right_p16, &mut idx_point_right_p24); } - for (i, stmt) in p16_indexe_statements.into_iter().enumerate() { + for (i, stmt) in p16_index_statements.into_iter().enumerate() { stmt.push(Evaluation::new( idx_point_right_p16.clone(), inner_values[i], )); } - for (i, stmt) in p24_indexe_statements.into_iter().enumerate() { + for (i, stmt) in p24_index_statements.into_iter().enumerate() { stmt.push(Evaluation::new( idx_point_right_p24.clone(), inner_values[i + 4], diff --git a/crates/lean_prover/src/prove_execution.rs b/crates/lean_prover/src/prove_execution.rs index 494f3415..9c7a34cc 100644 --- a/crates/lean_prover/src/prove_execution.rs +++ b/crates/lean_prover/src/prove_execution.rs @@ -421,7 +421,7 @@ pub fn prove_execution( .iter() .map(|c| c.as_slice()) .collect::>(), - ), // we do not use packing here beacause it's slower in practice (this sumcheck is small) + ), // we do not use packing here because it's slower in practice (this sumcheck is small) &dot_product_footprint_computation, &dot_product_footprint_computation, &[], @@ -937,7 +937,7 @@ pub fn prove_execution( } let (initial_pc_statement, final_pc_statement) = - intitial_and_final_pc_conditions(bytecode, log_n_cycles); + initial_and_final_pc_conditions(bytecode, log_n_cycles); let dot_product_computation_column_evals = dot_product_computations_base .par_iter() @@ -1073,17 +1073,17 @@ pub fn prove_execution( dot_product_air_statement(2), dot_product_logup_star_indexes_statement_a, grand_product_dot_product_table_indexes_statement_index_a, - ], // dot product: indexe a + ], // dot product: index a vec![ dot_product_air_statement(3), dot_product_logup_star_indexes_statement_b, grand_product_dot_product_table_indexes_statement_index_b, - ], // dot product: indexe b + ], // dot product: index b vec![ dot_product_air_statement(4), dot_product_logup_star_indexes_statement_res, grand_product_dot_product_table_indexes_statement_index_res, - ], // dot product: indexe res + ], // dot product: index res ], dot_product_computation_column_statements, ] diff --git a/crates/lean_prover/src/verify_execution.rs b/crates/lean_prover/src/verify_execution.rs index 47d703a5..cf12948c 100644 --- a/crates/lean_prover/src/verify_execution.rs +++ b/crates/lean_prover/src/verify_execution.rs @@ -610,7 +610,7 @@ pub fn verify_execution( } let (initial_pc_statement, final_pc_statement) = - intitial_and_final_pc_conditions(bytecode, log_n_cycles); + initial_and_final_pc_conditions(bytecode, log_n_cycles); let dot_product_computation_column_evals = verifier_state.next_extension_scalars_const::()?; @@ -780,17 +780,17 @@ pub fn verify_execution( dot_product_air_statement(2), dot_product_logup_star_indexes_statement_a, grand_product_dot_product_table_indexes_statement_index_a, - ], // dot product: indexe a + ], // dot product: index a vec![ dot_product_air_statement(3), dot_product_logup_star_indexes_statement_b, grand_product_dot_product_table_indexes_statement_index_b, - ], // dot product: indexe b + ], // dot product: index b vec![ dot_product_air_statement(4), dot_product_logup_star_indexes_statement_res, grand_product_dot_product_table_indexes_statement_index_res, - ], // dot product: indexe res + ], // dot product: index res ], dot_product_computation_column_statements, ]