diff --git a/barretenberg/cpp/scripts/audit/audit_scopes/dsl_audit_scope.md b/barretenberg/cpp/scripts/audit/audit_scopes/dsl_audit_scope.md index a405a5abfc84..b2f642f494fd 100644 --- a/barretenberg/cpp/scripts/audit/audit_scopes/dsl_audit_scope.md +++ b/barretenberg/cpp/scripts/audit/audit_scopes/dsl_audit_scope.md @@ -12,8 +12,6 @@ Commit hash: 2094fd1467dd9a94803b2c5007cf60ac357aa7d2 (22.12.2025) 4. `dsl/acir_format/acir_format.cpp` 5. `dsl/acir_format/arithmetic_constraints.hpp` 6. `dsl/acir_format/arithmetic_constraints.cpp` -7. `dsl/acir_format/round.hpp` -8. `dsl/acir_format/round.cpp` 9. `dsl/acir_format/utils.hpp` 10. `dsl/acir_format/utils.cpp` @@ -27,15 +25,13 @@ All constraints except arithmetic constraints work as follows: 1. Bytes deserialised into Barretenberg's internal representation (`acir_to_constraint_buf`) 2. Iterate through all instances of the given constraint and add the constraint to the circuit (`acir_format`) -Arithmetic constraints work slighly differently because we leverage the UltraHonk arithmetisation to efficiently encode expressions of the following form: +Arithmetic constraints work slighly differently because we leverage the UltraHonk arithmetization to efficiently encode expressions of the following form: $ \sum_{i, j} c_{i,j} w_i w_j + \sum_i c_i w_i + c = 0 -$ where $w_i$ are witnesses, $c_{i,j}, c_i$ are the coefficients of the equation, and $c$ is the constant term. For a detailed explanation of how we leverage the UltraHonk arithmetisation to encode arithmetic constraints see the documentation for the functions `acir_to_constraint_buf::split_into_mul_quad_gates` and `arithmetic_constraints::create_big_quad_constraint`. +$ where $w_i$ are witnesses, $c_{i,j}, c_i$ are the coefficients of the equation, and $c$ is the constant term. For a detailed explanation of how we leverage the UltraHonk arithmetization to encode arithmetic constraints see the documentation for the functions `acir_to_constraint_buf::split_into_mul_quad_gates` and `arithmetic_constraints::create_big_quad_constraint`. The difference in how arithmetic constraints are handled is the reason why they are the only constraints that are part of this audit scope: to be sure that no bugs are present, the entire flow (from bytes to to constraints) has to be audited for arithmetic constraints. - - ## Test Files 1. `dsl/acir_format/acir_format.test.cpp` 2. `dsl/acir_format/arithmetic_constraints.test.cpp` diff --git a/barretenberg/cpp/scripts/audit/audit_scopes/honk_proving_system_audit_scope.md b/barretenberg/cpp/scripts/audit/audit_scopes/honk_proving_system_audit_scope.md index f7a993184975..bfd41baf381b 100644 --- a/barretenberg/cpp/scripts/audit/audit_scopes/honk_proving_system_audit_scope.md +++ b/barretenberg/cpp/scripts/audit/audit_scopes/honk_proving_system_audit_scope.md @@ -71,6 +71,8 @@ Note: Paths relative to `aztec-packages/barretenberg/cpp/src/barretenberg` 50. `dsl/acir_format/recursion_constraint.cpp` 51. `dsl/acir_format/recursion_constraint_output.hpp` 52. `dsl/acir_format/recursion_constraint_output.cpp` +53. `dsl/acir_format/honk_recursion_constraint.hpp` +54. `dsl/acir_format/honk_recursion_constraint.cpp` 53. `dsl/acir_format/mock_verifier_inputs.hpp` ## Summary of Module @@ -93,6 +95,7 @@ The Honk proving system is Barretenberg's core SNARK proving system implementing 13. `ultra_honk/lookup.test.cpp` 14. `ultra_honk/rom_ram.test.cpp` 15. `ultra_honk/range_constraint.test.cpp` +16. `dsl/acir_format/honk_recursion_constraint.test.cpp` ## Security Mechanisms diff --git a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh index 69ee05e999fc..71e8fc14fe96 100755 --- a/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh +++ b/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_changed.sh @@ -13,7 +13,7 @@ cd .. # - Generate a hash for versioning: sha256sum bb-chonk-inputs.tar.gz # - Upload the compressed results: aws s3 cp bb-chonk-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-chonk-inputs-[hash(0:8)].tar.gz # Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0 -pinned_short_hash="c288d94d" +pinned_short_hash="04d835ae" pinned_chonk_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-chonk-inputs-${pinned_short_hash}.tar.gz" function compress_and_upload { diff --git a/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp b/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp index bb0d8080c168..9c290315fff1 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_chonk.cpp @@ -123,7 +123,7 @@ bool ChonkAPI::verify([[maybe_unused]] const Flags& flags, auto proof_fields = many_from_buffer(read_file(proof_path)); auto proof = ChonkProof::from_field_elements(proof_fields); - auto vk_buffer = read_file(vk_path); + auto vk_buffer = read_vk_file(vk_path); auto response = bbapi::ChonkVerify{ .proof = std::move(proof), .vk = std::move(vk_buffer) }.execute(); return response.valid; diff --git a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp index b7e335b6237c..0d7f08e947a6 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp @@ -101,7 +101,7 @@ bool UltraHonkAPI::verify(const Flags& flags, // Read input files auto public_inputs = many_from_buffer(read_file(public_inputs_path)); auto proof = many_from_buffer(read_file(proof_path)); - auto vk_bytes = read_file(vk_path); + auto vk_bytes = read_vk_file(vk_path); // Convert flags to ProofSystemSettings bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, @@ -209,7 +209,7 @@ void UltraHonkAPI::write_solidity_verifier(const Flags& flags, { BB_BENCH_NAME("UltraHonkAPI::write_solidity_verifier"); // Read VK file - auto vk_bytes = read_file(vk_path); + auto vk_bytes = read_vk_file(vk_path); // Convert flags to ProofSystemSettings bbapi::ProofSystemSettings settings{ .ipa_accumulation = flags.ipa_accumulation, diff --git a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp index b84d75c2efbc..93e9424154ba 100644 --- a/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/api/api_ultra_honk.test.cpp @@ -233,3 +233,35 @@ TEST_F(ApiUltraHonkTest, GatesWithOpcodesSmokeTest) // Check that output contains per-opcode information EXPECT_TRUE(output.find("gates_per_opcode") != std::string::npos); } + +TEST_F(ApiUltraHonkTest, VerifyWithMissingVkGivesActionableError) +{ + auto [bytecode_path, witness_path] = create_test_circuit_files(test_dir); + + API::Flags flags; + flags.oracle_hash_type = "poseidon2"; + flags.write_vk = true; + + UltraHonkAPI api; + + // Generate proof with vk + auto proof_output_dir = test_dir / "proof"; + std::filesystem::create_directories(proof_output_dir); + api.prove(flags, bytecode_path, witness_path, "", proof_output_dir); + + // Try to verify with a non-existent vk path + auto nonexistent_vk_path = test_dir / "nonexistent_vk"; + try { + api.verify(flags, proof_output_dir / "public_inputs", proof_output_dir / "proof", nonexistent_vk_path); + FAIL() << "Expected an exception to be thrown"; + } catch (const std::runtime_error& e) { + std::string error_msg = e.what(); + // Check that the error message contains actionable guidance + EXPECT_TRUE(error_msg.find("--write_vk") != std::string::npos) + << "Error message should mention --write_vk flag. Got: " << error_msg; + EXPECT_TRUE(error_msg.find("bb write_vk") != std::string::npos) + << "Error message should mention bb write_vk command. Got: " << error_msg; + EXPECT_TRUE(error_msg.find("--vk_path") != std::string::npos) + << "Error message should mention --vk_path option. Got: " << error_msg; + } +} diff --git a/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp b/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp index c0e4521e9304..3e551d2f8e9c 100644 --- a/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp +++ b/barretenberg/cpp/src/barretenberg/api/aztec_process.cpp @@ -324,7 +324,7 @@ bool process_all_artifacts(const std::string& search_path, bool force) auto artifacts = find_contract_artifacts(search_path); if (artifacts.empty()) { - info("No contract artifacts found. Please compile your contracts first with 'nargo compile'."); + info("No contract artifacts found in '", search_path, "'."); return false; } diff --git a/barretenberg/cpp/src/barretenberg/api/file_io.hpp b/barretenberg/cpp/src/barretenberg/api/file_io.hpp index c5436170d0ca..fe9cf30adf5f 100644 --- a/barretenberg/cpp/src/barretenberg/api/file_io.hpp +++ b/barretenberg/cpp/src/barretenberg/api/file_io.hpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -100,4 +101,23 @@ template inline std::string field_elements_to_json(const std::vect ss << "]"; return ss.str(); } + +/** + * @brief Read a verification key file with an actionable error message if not found. + * + * @param vk_path Path to the verification key file + * @return std::vector The verification key bytes + * @throws std::runtime_error with actionable message if vk file not found + */ +inline std::vector read_vk_file(const std::filesystem::path& vk_path) +{ + try { + return read_file(vk_path); + } catch (const std::runtime_error&) { + THROW std::runtime_error("Unable to open file: " + vk_path.string() + + "\nGenerate a vk during proving by running `bb prove` with an additional `--write_vk` " + "flag, or run `bb write_vk` to generate a standalone vk." + "\nIf you already have a vk file, specify its path with `--vk_path `."); + } +} } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp index ed72e09780cd..1c79489b5f9f 100644 --- a/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp +++ b/barretenberg/cpp/src/barretenberg/boomerang_value_detection/graph.cpp @@ -1241,9 +1241,6 @@ inline void StaticAnalyzer_::remove_unnecessary_sha256_plook BasicTableId::SHA256_WITNESS_SLICE_14_ROTATE_1, BasicTableId::SHA256_BASE16, BasicTableId::SHA256_BASE16_ROTATE2, - BasicTableId::SHA256_BASE16_ROTATE6, - BasicTableId::SHA256_BASE16_ROTATE7, - BasicTableId::SHA256_BASE16_ROTATE8, BasicTableId::SHA256_BASE28, BasicTableId::SHA256_BASE28_ROTATE3, BasicTableId::SHA256_BASE28_ROTATE6 }; diff --git a/barretenberg/cpp/src/barretenberg/chonk/README.md b/barretenberg/cpp/src/barretenberg/chonk/README.md index fbc1418df5ce..debe345dfa9f 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/README.md +++ b/barretenberg/cpp/src/barretenberg/chonk/README.md @@ -726,7 +726,7 @@ This chain ensures the op queue history is maintained correctly. The Merge proto ```cpp // In OinkVerifier::verify() (called by HypernovaFoldingVerifier for each instance) -FF vk_hash = vk->hash_with_origin_tagging(domain_separator, *transcript); +FF vk_hash = vk->hash_with_origin_tagging(*transcript); transcript->add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); // All subsequent challenges now depend on this hash ``` diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp index 445345081333..c69c1b89cca1 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk.cpp @@ -117,7 +117,7 @@ Chonk::perform_recursive_verification_and_databus_consistency_checks( // Update previous accumulator hash so that we can check it against the one extracted from the public inputs if (verifier_inputs.is_kernel) { - prev_accum_hash = input_verifier_accumulator->hash_with_origin_tagging("", *accumulation_recursive_transcript); + prev_accum_hash = input_verifier_accumulator->hash_with_origin_tagging(*accumulation_recursive_transcript); } RecursiveFoldingVerifier folding_verifier(accumulation_recursive_transcript); @@ -340,7 +340,7 @@ void Chonk::complete_kernel_circuit_logic(ClientCircuit& circuit) kernel_output.ecc_op_tables = T_prev_commitments; RecursiveTranscript hash_transcript; kernel_output.output_hn_accum_hash = - current_stdlib_verifier_accumulator->hash_with_origin_tagging("", hash_transcript); + current_stdlib_verifier_accumulator->hash_with_origin_tagging(hash_transcript); info("Kernel output accumulator hash: ", kernel_output.output_hn_accum_hash); #ifndef NDEBUG info("Chonk recursive verification: accumulator hash set in the public inputs matches the one " @@ -602,7 +602,7 @@ void Chonk::update_native_verifier_accumulator(const VerifierInputs& queue_entry } if (!queue_entry.is_kernel) { - native_verifier_accum_hash = native_verifier_accum.hash_with_origin_tagging("", *verifier_transcript); + native_verifier_accum_hash = native_verifier_accum.hash_with_origin_tagging(*verifier_transcript); } info("Chonk accumulate: prover and verifier accumulators match: ", @@ -628,7 +628,7 @@ void Chonk::debug_incoming_circuit(ClientCircuit& circuit, // Compare precomputed VK with the one generated during accumulation auto vk = std::make_shared(prover_instance->get_precomputed()); info("Does the precomputed vk match with the one generated during accumulation? ", - vk->compare(*precomputed_vk) ? "true" : "false"); + vk->compare(*precomputed_vk, MegaFlavor::CommitmentLabels().get_precomputed()) ? "true" : "false"); info("======= END OF DEBUGGING INFO FOR INCOMING CIRCUIT ======="); } diff --git a/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp b/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp index fcab933c975b..5a5b50c4aaa3 100644 --- a/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp +++ b/barretenberg/cpp/src/barretenberg/chonk/chonk_transcript_invariants.test.cpp @@ -157,9 +157,8 @@ TEST_F(ChonkTranscriptInvariantTests, RecursiveVerificationTranscriptCount) UltraCircuitBuilder builder; size_t index_before_verify = bb::unique_transcript_index.load(); - // Create stdlib VK from native VK and wrap it in VKAndHash - auto stdlib_vk = std::make_shared(&builder, vk_and_hash->vk); - auto stdlib_vk_and_hash = std::make_shared(stdlib_vk); + // Create stdlib VK and hash from native VK + auto stdlib_vk_and_hash = std::make_shared(builder, vk_and_hash->vk); RecursiveVerifier verifier(stdlib_vk_and_hash); ChonkStdlibProof stdlib_proof(builder, proof); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp index bc0bcef35c84..82d8457848d0 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake2s_constraint.cpp @@ -8,7 +8,6 @@ #include "barretenberg/common/assert.hpp" #include "barretenberg/stdlib/hash/blake2s/blake2s.hpp" #include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" -#include "round.hpp" namespace acir_format { @@ -24,16 +23,12 @@ template void create_blake2s_constraints(Builder& builder, co for (const auto& witness_index_num_bits : constraint.inputs) { auto witness_index = witness_index_num_bits.blackbox_input; - auto num_bits = witness_index_num_bits.num_bits; - - // XXX: The implementation requires us to truncate the element to the nearest byte and not bit - auto num_bytes = round_to_nearest_byte(num_bits); - BB_ASSERT_LTE(num_bytes, 32U, "Input num_bytes exceeds 32 per element in blake2s"); - field_ct element = to_field_ct(witness_index, builder); - // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte - byte_array_ct element_bytes(element, num_bytes); + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte. Note that num_bytes = + // ceil(witness_index_num_bits.num_bits/8). Here, num_bits is set to 8 when constructing the vector of inputs in + // the Blake2s constraint. Hence, we set num_bytes = 1. + byte_array_ct element_bytes(element, 1); // Safe write: both arr and element_bytes are constrained arr.write(element_bytes); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp index b36189451ac3..14b0b0f3c321 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/blake3_constraint.cpp @@ -8,7 +8,6 @@ #include "barretenberg/common/assert.hpp" #include "barretenberg/stdlib/hash/blake3s/blake3s.hpp" #include "barretenberg/stdlib/primitives/byte_array/byte_array.hpp" -#include "round.hpp" namespace acir_format { @@ -22,15 +21,12 @@ template void create_blake3_constraints(Builder& builder, con for (const auto& witness_index_num_bits : constraint.inputs) { auto witness_index = witness_index_num_bits.blackbox_input; - auto num_bits = witness_index_num_bits.num_bits; - - // XXX: The implementation requires us to truncate the element to the nearest byte and not bit - auto num_bytes = round_to_nearest_byte(num_bits); - BB_ASSERT_LTE(num_bytes, 32U, "Input num_bytes exceeds 32 per element in blake3s"); field_ct element = to_field_ct(witness_index, builder); - // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte - byte_array_ct element_bytes(element, num_bytes); + // byte_array_ct(field, num_bytes) constructor adds range constraints for each byte. Note that num_bytes = + // ceil(witness_index_num_bits.num_bits/8). Here, num_bits is set to 8 when constructing the vector of inputs in + // the Blake3 constraint. Hence, we set num_bytes = 1. + byte_array_ct element_bytes(element, 1); // Safe write: both arr and element_bytes are constrained arr.write(element_bytes); diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp index d6f71781772c..e625cf109237 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/gate_count_constants.hpp @@ -38,8 +38,8 @@ template inline constexpr size_t ECDSA_SECP256K1 = 41994 + ZE template inline constexpr size_t ECDSA_SECP256R1 = 72209 + ZERO_GATE + (IsMegaBuilder ? 2 : 0); -template inline constexpr size_t BLAKE2S = 2959 + ZERO_GATE + MEGA_OFFSET; -template inline constexpr size_t BLAKE3 = 2165 + ZERO_GATE + MEGA_OFFSET; +template inline constexpr size_t BLAKE2S = 2952 + ZERO_GATE + MEGA_OFFSET; +template inline constexpr size_t BLAKE3 = 2158 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t KECCAK_PERMUTATION = 17387 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t POSEIDON2_PERMUTATION = 73 + ZERO_GATE + MEGA_OFFSET; template inline constexpr size_t MULTI_SCALAR_MUL = 3550 + ZERO_GATE; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp index 76c6bee92c5a..d4fe63643cb4 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.cpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: completed, auditors: [Federico], commit: } +// internal: { status: completed, auditors: [Federico], commit: 8b4e1279ef130eeb18bce9ce2a9f0fa39a243697} // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.hpp index 5a188356bcfe..cff81f465980 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/honk_recursion_constraint.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: completed, auditors: [Federico], commit: } +// internal: { status: completed, auditors: [Federico], commit: 8b4e1279ef130eeb18bce9ce2a9f0fa39a243697} // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/keccak_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/keccak_constraint.cpp index 41f66a3b6b6e..8d46642d2bca 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/keccak_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/keccak_constraint.cpp @@ -7,7 +7,6 @@ #include "keccak_constraint.hpp" #include "barretenberg/stdlib/hash/keccak/keccak.hpp" #include "barretenberg/stdlib/primitives/circuit_builders/circuit_builders_fwd.hpp" -#include "round.hpp" namespace acir_format { diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp index c589795de80a..2b6e937d5980 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/opcode_gate_count.test.cpp @@ -436,7 +436,7 @@ TYPED_TEST(OpcodeGateCountTests, Blake2s) blake2s_constraint.inputs.push_back(Blake2sInput{ .blackbox_input = WitnessOrConstant::from_index(0), - .num_bits = 32, + .num_bits = 8, }); for (size_t i = 0; i < 32; ++i) { @@ -466,7 +466,7 @@ TYPED_TEST(OpcodeGateCountTests, Blake3) blake3_constraint.inputs.push_back(Blake3Input{ .blackbox_input = WitnessOrConstant::from_index(0), - .num_bits = 32, + .num_bits = 8, }); for (size_t i = 0; i < 32; ++i) { diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.cpp deleted file mode 100644 index 6e9645229981..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } -// external_1: { status: not started, auditors: [], commit: } -// external_2: { status: not started, auditors: [], commit: } -// ===================== - -#include "round.hpp" - -namespace acir_format { - -// Rounds a number to the nearest multiple of 8 -uint32_t round_to_nearest_mul_8(uint32_t num_bits) -{ - BB_ASSERT(num_bits <= UINT32_MAX - 8, "round_to_nearest_mul_8: num_bits too large, calculation would overflow."); - uint32_t remainder = num_bits % 8; - if (remainder == 0) { - return num_bits; - } - - return num_bits + 8 - remainder; -} - -// Rounds the number of bits to the nearest byte -uint32_t round_to_nearest_byte(uint32_t num_bits) -{ - return round_to_nearest_mul_8(num_bits) / 8; -} - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.hpp deleted file mode 100644 index 4fc17a08211b..000000000000 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/round.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } -// external_1: { status: not started, auditors: [], commit: } -// external_2: { status: not started, auditors: [], commit: } -// ===================== - -#include "barretenberg/common/assert.hpp" -#include - -namespace acir_format { - -// Rounds a number to the nearest multiple of 8 -uint32_t round_to_nearest_mul_8(uint32_t num_bits); - -// Rounds the number of bits to the nearest byte -uint32_t round_to_nearest_byte(uint32_t num_bits); - -} // namespace acir_format diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.cpp index d4ec8342235b..3ffffb1bc4a1 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.cpp @@ -5,9 +5,8 @@ // ===================== #include "sha256_constraint.hpp" -#include "barretenberg/serialize/msgpack_impl.hpp" +#include "barretenberg/common/zip_view.hpp" #include "barretenberg/stdlib/hash/sha256/sha256.hpp" -#include "round.hpp" namespace acir_format { @@ -16,30 +15,30 @@ void create_sha256_compression_constraints(Builder& builder, const Sha256Compres { using field_ct = bb::stdlib::field_t; - std::array inputs; - std::array hash_inputs; + std::array hash_inputs; // previous (or initial) hash state + std::array inputs; // message block to compress // Get the witness assignment for each witness index - // Note that we do not range-check the inputs, which should be 32 bits, - // because of the lookup-tables. - size_t i = 0; - for (const auto& witness_index_num_bits : constraint.inputs) { - inputs[i] = to_field_ct(witness_index_num_bits, builder); - ++i; + // AUDITTODO: We do not range-check the inputs here, assuming lookup tables in sha256_block + // provide implicit 32-bit constraints. However, analysis shows this assumption is incomplete: + // - inputs[0] is NEVER lookup-constrained + // - hash_values[3] and hash_values[7] are used in arithmetic before being lookup-constrained + // These values are only weakly bounded (~35 bits) by add_normalize overflow constraints. + // See AUDITTODO in stdlib/hash/sha256/sha256.cpp for details and recommended fix. + for (auto [input, witness_or_constant] : zip_view(inputs, constraint.inputs)) { + input = to_field_ct(witness_or_constant, builder); } - i = 0; - for (const auto& witness_index_num_bits : constraint.hash_values) { - hash_inputs[i] = to_field_ct(witness_index_num_bits, builder); - ++i; + for (auto [hash_input, witness_or_constant] : zip_view(hash_inputs, constraint.hash_values)) { + hash_input = to_field_ct(witness_or_constant, builder); } // Compute sha256 compression - auto output_bytes = bb::stdlib::SHA256::sha256_block(hash_inputs, inputs); + std::array output_state = bb::stdlib::SHA256::sha256_block(hash_inputs, inputs); // Constrain outputs to match expected witness indices - for (size_t i = 0; i < 8; ++i) { - field_ct result_witness = field_ct::from_witness_index(&builder, constraint.result[i]); - output_bytes[i].assert_equal(result_witness); + for (auto [output, result_idx] : zip_view(output_state, constraint.result)) { + field_ct result_witness = field_ct::from_witness_index(&builder, result_idx); + output.assert_equal(result_witness); } } diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.hpp index c16849323526..0a9f84d045b1 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.hpp @@ -8,17 +8,9 @@ #include "barretenberg/dsl/acir_format/witness_constant.hpp" #include #include -#include namespace acir_format { -struct Sha256Input { - uint32_t witness; - uint32_t num_bits; - - friend bool operator==(Sha256Input const& lhs, Sha256Input const& rhs) = default; -}; - struct Sha256Compression { std::array, 16> inputs; std::array, 8> hash_values; diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp index 129a7b8619bd..5123f16ed2bc 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/sha256_constraint.test.cpp @@ -1,55 +1,154 @@ #include "sha256_constraint.hpp" #include "acir_format.hpp" -#include "barretenberg/ultra_honk/ultra_prover.hpp" +#include "barretenberg/crypto/sha256/sha256.hpp" +#include "barretenberg/dsl/acir_format/test_class.hpp" +#include "barretenberg/dsl/acir_format/utils.hpp" +#include "barretenberg/dsl/acir_format/witness_constant.hpp" #include -#include -namespace acir_format::tests { +using namespace bb; +using namespace acir_format; -class Sha256Tests : public ::testing::Test { +template class Sha256TestingFunctions { + public: + using Builder = BuilderType; + using AcirConstraint = Sha256Compression; + using FF = Builder::FF; + + struct InvalidWitness { + public: + enum class Target : uint8_t { + None, + Input, // Tamper with an input value + HashValue, // Tamper with a previous hash state value + Output, // Tamper with an output value + }; + + static std::vector get_all() + { + return { Target::None, Target::Input, Target::HashValue, Target::Output }; + } + + static std::vector get_labels() { return { "None", "Input", "HashValue", "Output" }; } + }; + + static ProgramMetadata generate_metadata() { return ProgramMetadata{}; } + + static std::pair invalidate_witness( + AcirConstraint constraint, WitnessVector witness_values, const InvalidWitness::Target& invalid_witness_target) + { + switch (invalid_witness_target) { + case InvalidWitness::Target::Input: { + if constexpr (IsInputConstant) { + constraint.inputs[0] = WitnessOrConstant::from_constant(constraint.inputs[0].value + FF(1)); + } else { + witness_values[constraint.inputs[0].index] += FF(1); + } + break; + } + case InvalidWitness::Target::HashValue: { + if constexpr (IsInputConstant) { + constraint.hash_values[0] = + WitnessOrConstant::from_constant(constraint.hash_values[0].value + FF(1)); + } else { + witness_values[constraint.hash_values[0].index] += FF(1); + } + break; + } + case InvalidWitness::Target::Output: { + witness_values[constraint.result[0]] += FF(1); + break; + } + case InvalidWitness::Target::None: + break; + } + + return { constraint, witness_values }; + } + + /** + * @brief Generate a valid Sha256Compression constraint with correct witness values + */ + static void generate_constraints(Sha256Compression& sha256_constraint, WitnessVector& witness_values) + { + // Helper to create WitnessOrConstant from a value + auto make_witness_or_constant = [&](uint32_t value) -> WitnessOrConstant { + if constexpr (IsInputConstant) { + return WitnessOrConstant::from_constant(FF(value)); + } else { + uint32_t idx = static_cast(witness_values.size()); + witness_values.emplace_back(FF(value)); + return WitnessOrConstant::from_index(idx); + } + }; + + // Input: 16 words of zeros (512-bit message block) + std::array input_block = { 0 }; + + // Initial hash state (SHA-256 IV) + std::array hash_values = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; + + // Compute expected output using native SHA-256 compression + std::array result = crypto::sha256_block(hash_values, input_block); + + // Build the constraint + for (size_t i = 0; i < 16; ++i) { + sha256_constraint.inputs[i] = make_witness_or_constant(input_block[i]); + } + for (size_t i = 0; i < 8; ++i) { + sha256_constraint.hash_values[i] = make_witness_or_constant(hash_values[i]); + } + + // Add output values to witness and set result indices + for (size_t i = 0; i < 8; ++i) { + sha256_constraint.result[i] = static_cast(witness_values.size()); + witness_values.emplace_back(FF(result[i])); + } + } +}; + +// Test with constant inputs +template +class Sha256ConstraintsTestInputConstant : public ::testing::Test, + public TestClass> { protected: - static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } + static void SetUpTestSuite() { srs::init_file_crs_factory(srs::bb_crs_path()); } }; -TEST_F(Sha256Tests, TestSha256Compression) +using BuilderTypes = testing::Types; + +TYPED_TEST_SUITE(Sha256ConstraintsTestInputConstant, BuilderTypes); + +TYPED_TEST(Sha256ConstraintsTestInputConstant, GenerateVKFromConstraints) { - Sha256Compression sha256_compression; + using Flavor = std::conditional_t, UltraFlavor, MegaFlavor>; + TestFixture::template test_vk_independence(); +} - for (size_t i = 0; i < 16; ++i) { - sha256_compression.inputs[i] = WitnessOrConstant::from_index(static_cast(i)); - } - for (size_t i = 0; i < 8; ++i) { - sha256_compression.hash_values[i] = WitnessOrConstant::from_index(static_cast(i + 16)); - } - for (size_t i = 0; i < 8; ++i) { - sha256_compression.result[i] = static_cast(i + 24); - } +TYPED_TEST(Sha256ConstraintsTestInputConstant, Tampering) +{ + [[maybe_unused]] std::vector _ = TestFixture::test_tampering(); +} - std::array input_block = { 0 }; - std::array hash_values = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; - std::array result = bb::crypto::sha256_block(hash_values, input_block); +// Test with witness inputs +template +class Sha256ConstraintsTestInputWitness : public ::testing::Test, + public TestClass> { + protected: + static void SetUpTestSuite() { srs::init_file_crs_factory(srs::bb_crs_path()); } +}; - WitnessVector witness(32, 0); - for (size_t idx = 16; idx < 24; idx++) { - witness[idx] = hash_values[idx - 16]; - } - for (size_t idx = 0; idx < 8; idx++) { - witness[24 + idx] = result[idx]; - } +TYPED_TEST_SUITE(Sha256ConstraintsTestInputWitness, BuilderTypes); - AcirFormat constraint_system{ - .max_witness_index = static_cast(witness.size()) - 1, - .num_acir_opcodes = 1, - .public_inputs = {}, - .sha256_compression = { sha256_compression }, - .original_opcode_indices = AcirFormatOriginalOpcodeIndices{ .sha256_compression = { 0 } }, - }; +TYPED_TEST(Sha256ConstraintsTestInputWitness, GenerateVKFromConstraints) +{ + using Flavor = std::conditional_t, UltraFlavor, MegaFlavor>; + TestFixture::template test_vk_independence(); +} - AcirProgram program{ constraint_system, witness }; - auto builder = create_circuit(program); - EXPECT_TRUE(CircuitChecker::check(builder)); - EXPECT_FALSE(builder.failed()); +TYPED_TEST(Sha256ConstraintsTestInputWitness, Tampering) +{ + [[maybe_unused]] std::vector _ = TestFixture::test_tampering(); } -} // namespace acir_format::tests diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.cpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.cpp index 86eacbfd0af6..63618723d899 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.cpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.cpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Completed, auditors: [Federico], commit: d4aff8893338c31425565db5a5a560048c33f27a} // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== diff --git a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp index 02d16e7770f3..b808a495436f 100644 --- a/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp +++ b/barretenberg/cpp/src/barretenberg/dsl/acir_format/utils.hpp @@ -1,5 +1,5 @@ // === AUDIT STATUS === -// internal: { status: Planned, auditors: [], commit: } +// internal: { status: Completed, auditors: [Federico], commit: d4aff8893338c31425565db5a5a560048c33f27a} // external_1: { status: not started, auditors: [], commit: } // external_2: { status: not started, auditors: [], commit: } // ===================== @@ -33,7 +33,7 @@ template std::vector> fields_from_witnesses(Builder& builder, std::span witness_indices); /** - * @brief Convert a vector of field_t elements to a byte_array enforcing each element to be a boolean + * @brief Convert a vector of field_t elements to a byte_array enforcing each element to fit in one byte * * @tparam Builder * @param builder @@ -62,7 +62,7 @@ std::vector add_public_inputs_to_proof(const std::vector& pr /// The functions below are helpers for handling witnesses in testing situations /** - * @brief Given recursion data (proof, key, key hash, predicate and the number of public inputs) and a proof type, + * @brief Given recursion data (proof, key, key hash, predicate, and the number of public inputs) and a proof type, * populate a witness vector with these values and return the associated recursion constraint. * * @details The proof is assumed to be barretenberg-style: containing all the public inputs at its start. The variable @@ -98,14 +98,19 @@ std::vector add_to_witness_and_track_indices(std::vector& witn witness.emplace_back(input.y); indices.emplace_back(witness.size()); witness.emplace_back(input.is_point_at_infinity() ? bb::fr(1) : bb::fr(0)); - } else { - // If no other type is matched, we assume T is a span of values + } else if constexpr (requires { + input.data(); + input.size(); + }) { + // T is a span or span-like container of values indices.reserve(input.size()); auto witness_idx = static_cast(witness.size()); for (const auto& value : input) { witness.push_back(bb::fr(value)); indices.push_back(witness_idx++); } + } else { + bb::assert_failure("Unsupported type for add_to_witness_and_track_indices"); } return indices; @@ -123,7 +128,7 @@ inline uint32_t add_to_witness_and_track_indices(std::vector& witness, c } /** - * @brief Add a span of values to the witness and track their indices, returning them as a fixed-size array. + * @brief Add values to the witness and track their indices, returning them as a fixed-size array. */ template std::array add_to_witness_and_track_indices(std::vector& witness, const T& input) diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index 9cd22f2b3d77..45828405605c 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -46,7 +46,9 @@ class ECCVMFlavor { using CommitmentKey = bb::CommitmentKey; using VerifierCommitmentKey = bb::VerifierCommitmentKey; using MSM = bb::eccvm::MSM; - using Transcript = NativeTranscript; + using Codec = FrCodec; + using HashFunction = crypto::Poseidon2; + using Transcript = BaseTranscript; using Proof = HonkProof; // indicates when evaluating sumcheck, edges must be extended to be MAX_PARTIAL_RELATION_LENGTH @@ -814,7 +816,9 @@ class ECCVMFlavor { * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for * portability of our circuits. */ - class VerificationKey : public NativeVerificationKey_, Transcript> { + class VerificationKey : public NativeVerificationKey_, Codec, HashFunction> { + using Base = NativeVerificationKey_, Codec, HashFunction>; + public: bool operator==(const VerificationKey&) const = default; @@ -826,7 +830,7 @@ class ECCVMFlavor { // Default construct the fixed VK that results from ECCVM_FIXED_SIZE VerificationKey() - : NativeVerificationKey_(ECCVM_FIXED_SIZE, /*num_public_inputs=*/0) + : Base(ECCVM_FIXED_SIZE, /*num_public_inputs=*/0) { this->pub_inputs_offset = 0; @@ -838,7 +842,7 @@ class ECCVMFlavor { } VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : NativeVerificationKey_(circuit_size, num_public_inputs) + : Base(circuit_size, num_public_inputs) {} VerificationKey(const std::shared_ptr& proving_key) @@ -857,11 +861,10 @@ class ECCVMFlavor { * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * * @param domain_separator - * @param transcript + * @param tag * @returns The hash of the verification key */ - fr hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + typename Base::DataType hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } diff --git a/barretenberg/cpp/src/barretenberg/ext/starknet/flavor/ultra_starknet_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/ext/starknet/flavor/ultra_starknet_zk_flavor.hpp index 31ed20ad5d17..daef84c0c270 100644 --- a/barretenberg/cpp/src/barretenberg/ext/starknet/flavor/ultra_starknet_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/ext/starknet/flavor/ultra_starknet_zk_flavor.hpp @@ -8,149 +8,7 @@ namespace bb { class UltraStarknetZKFlavor : public UltraKeccakZKFlavor { public: - /** - * @brief Derived class that defines proof structure for Ultra zero knowledge proofs, as well as supporting - * functions. - * TODO(https://github.com/AztecProtocol/barretenberg/issues/1355): Deduplicate zk flavor transcripts. - */ - - class Transcript : public Transcript_ { - public: - using Base = Transcript_; - // Note: we have a different vector of univariates because the degree for ZK flavors differs - std::vector> zk_sumcheck_univariates; - Commitment libra_concatenation_commitment; - FF libra_sum; - FF libra_claimed_evaluation; - Commitment libra_grand_sum_commitment; - Commitment libra_quotient_commitment; - FF libra_concatenation_eval; - FF libra_shifted_grand_sum_eval; - FF libra_grand_sum_eval; - FF libra_quotient_eval; - Commitment hiding_polynomial_commitment; - FF hiding_polynomial_eval; - - Transcript() = default; - - static std::shared_ptr prover_init_empty() - { - auto transcript = Base::prover_init_empty(); - return std::static_pointer_cast(transcript); - }; - - static std::shared_ptr verifier_init_empty(const std::shared_ptr& transcript) - { - auto verifier_transcript = Base::verifier_init_empty(transcript); - return std::static_pointer_cast(verifier_transcript); - }; - - /** - * @brief Takes a FULL Ultra proof and deserializes it into the public member variables - * that compose the structure. Must be called in order to access the structure of the - * proof. - * - */ - void deserialize_full_transcript(size_t public_input_size) - { - // take current proof and put them into the struct - size_t num_frs_read = 0; - auto& proof_data = this->proof_data; - for (size_t i = 0; i < public_input_size; ++i) { - this->public_inputs.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - this->w_l_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_r_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_o_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_counts_comm = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_tags_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_4_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_inverses_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->z_perm_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_concatenation_commitment = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_sum = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N; ++i) { - zk_sumcheck_univariates.push_back( - Base::template deserialize_from_buffer>( - proof_data, num_frs_read)); - } - libra_claimed_evaluation = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->sumcheck_evaluations = - Base::template deserialize_from_buffer>(proof_data, num_frs_read); - libra_grand_sum_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - hiding_polynomial_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - hiding_polynomial_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N - 1; ++i) { - this->gemini_fold_comms.push_back( - Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N; ++i) { - this->gemini_fold_evals.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - libra_concatenation_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_shifted_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->shplonk_q_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - this->kzg_w_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - } - - /** - * @brief Serializes the structure variables into a FULL Ultra proof. Should be called - * only if deserialize_full_transcript() was called and some transcript variable was - * modified. - * - */ - void serialize_full_transcript() - { - auto& proof_data = this->proof_data; - size_t old_proof_length = proof_data.size(); - proof_data.clear(); // clear proof_data so the rest of the function can replace it - for (const auto& public_input : this->public_inputs) { - Base::template serialize_to_buffer(public_input, proof_data); - } - Base::template serialize_to_buffer(this->w_l_comm, proof_data); - Base::template serialize_to_buffer(this->w_r_comm, proof_data); - Base::template serialize_to_buffer(this->w_o_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_counts_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_read_tags_comm, proof_data); - Base::template serialize_to_buffer(this->w_4_comm, proof_data); - Base::template serialize_to_buffer(this->lookup_inverses_comm, proof_data); - Base::template serialize_to_buffer(this->z_perm_comm, proof_data); - Base::template serialize_to_buffer(libra_concatenation_commitment, proof_data); - Base::template serialize_to_buffer(libra_sum, proof_data); - - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N; ++i) { - Base::template serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); - } - Base::template serialize_to_buffer(libra_claimed_evaluation, proof_data); - - Base::template serialize_to_buffer(this->sumcheck_evaluations, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_commitment, proof_data); - Base::template serialize_to_buffer(libra_quotient_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_commitment, proof_data); - Base::template serialize_to_buffer(hiding_polynomial_eval, proof_data); - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N - 1; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_comms[i], proof_data); - } - for (size_t i = 0; i < CONST_PROOF_SIZE_LOG_N; ++i) { - Base::template serialize_to_buffer(this->gemini_fold_evals[i], proof_data); - } - Base::template serialize_to_buffer(libra_concatenation_eval, proof_data); - Base::template serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_grand_sum_eval, proof_data); - Base::template serialize_to_buffer(libra_quotient_eval, proof_data); - Base::template serialize_to_buffer(this->shplonk_q_comm, proof_data); - Base::template serialize_to_buffer(this->kzg_w_comm, proof_data); - - BB_ASSERT_EQ(proof_data.size(), old_proof_length); - } - }; + using Transcript = starknet::StarknetTranscript; }; } // namespace bb #endif diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index e43eef87508b..350e4a3c854b 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -130,13 +130,18 @@ template struct Precomput * have a native equivalent, and Builder also doesn't have a native equivalent. * * @tparam PrecomputedEntities An instance of PrecomputedEntities_ with affine_element data type and handle type. + * @tparam Codec The codec used for serialization (e.g., FrCodec, U256Codec) + * @tparam HashFunction The hash function used for VK hashing (e.g., Poseidon2, Keccak) */ template class NativeVerificationKey_ : public PrecomputedCommitments { public: using Commitment = typename PrecomputedCommitments::DataType; + using DataType = typename Codec::DataType; uint64_t log_circuit_size = 0; uint64_t num_public_inputs = 0; uint64_t pub_inputs_offset = 0; @@ -177,10 +182,25 @@ class NativeVerificationKey_ : public PrecomputedCommitments { virtual ~NativeVerificationKey_() = default; NativeVerificationKey_() = default; NativeVerificationKey_(const size_t circuit_size, const size_t num_public_inputs) + : log_circuit_size(numeric::get_msb(circuit_size)) + , num_public_inputs(num_public_inputs) {}; + + /** + * @brief Construct VK from precomputed data by committing to polynomials + * @details Only available when CommitmentKeyType is specified (not void) + */ + template + requires(!std::is_void_v) + explicit NativeVerificationKey_(const PrecomputedData& precomputed) + : log_circuit_size(numeric::get_msb(precomputed.metadata.dyadic_size)) + , num_public_inputs(precomputed.metadata.num_public_inputs) + , pub_inputs_offset(precomputed.metadata.pub_inputs_offset) { - this->log_circuit_size = numeric::get_msb(circuit_size); - this->num_public_inputs = num_public_inputs; - }; + CommitmentKey commitment_key{ precomputed.metadata.dyadic_size }; + for (auto [polynomial, commitment] : zip_view(precomputed.polynomials, this->get_all())) { + commitment = commitment_key.commit(polynomial); + } + } /** * @brief Calculate the number of field elements needed for serialization @@ -189,12 +209,11 @@ class NativeVerificationKey_ : public PrecomputedCommitments { static size_t calc_num_data_types() { // Create a temporary instance to get the number of precomputed entities - size_t commitments_size = - PrecomputedCommitments::size() * Transcript::Codec::template calc_num_fields(); + size_t commitments_size = PrecomputedCommitments::size() * Codec::template calc_num_fields(); size_t metadata_size = 0; if constexpr (SerializeMetadata == VKSerializationMode::FULL) { // 3 metadata fields + commitments - metadata_size = 3 * Transcript::Codec::template calc_num_fields(); + metadata_size = 3 * Codec::template calc_num_fields(); } // else NO_METADATA: metadata_size remains 0 return metadata_size + commitments_size; @@ -205,15 +224,15 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * * @return std::vector */ - virtual std::vector to_field_elements() const + virtual std::vector to_field_elements() const { - auto serialize = [](const auto& input, std::vector& buffer) { - std::vector input_fields = Transcript::serialize(input); + auto serialize = [](const auto& input, std::vector& buffer) { + std::vector input_fields = Codec::serialize_to_fields(input); buffer.insert(buffer.end(), input_fields.begin(), input_fields.end()); }; - std::vector elements; + std::vector elements; if constexpr (SerializeMetadata == VKSerializationMode::FULL) { serialize(this->log_circuit_size, elements); @@ -235,13 +254,13 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * @brief Populate verification key from field elements * @param elements Field elements to deserialize from */ - size_t from_field_elements(const std::span& elements) + size_t from_field_elements(const std::span& elements) { size_t idx = 0; auto deserialize = [&idx, &elements](T& target) { - size_t size = Transcript::Codec::template calc_num_fields(); - target = Transcript::template deserialize(elements.subspan(idx, size)); + size_t size = Codec::template calc_num_fields(); + target = Codec::template deserialize_from_fields(elements.subspan(idx, size)); idx += size; }; @@ -264,7 +283,7 @@ class NativeVerificationKey_ : public PrecomputedCommitments { */ fr hash() const { - fr vk_hash = Transcript::HashFunction::hash(this->to_field_elements()); + fr vk_hash = HashFunction::hash(this->to_field_elements()); return vk_hash; } @@ -275,21 +294,17 @@ class NativeVerificationKey_ : public PrecomputedCommitments { * properly. By tagging the VK components directly, we ensure all VK witnesses have proper origin tags. * * @param domain_separator (currently unused, kept for API compatibility) - * @param transcript Used to extract tag context (transcript_index, round_index) + * @param tag The origin tag extracted from the transcript * @returns The hash of the verification key */ - virtual typename Transcript::DataType hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - Transcript& transcript) const + virtual DataType hash_with_origin_tagging(const OriginTag& tag) const { - using DataType = typename Transcript::DataType; - using Codec = typename Transcript::Codec; + static constexpr bool in_circuit = InCircuit; std::vector vk_elements; - const OriginTag tag = bb::extract_transcript_tag(transcript); - // Tag, serialize, and append to vk_elements auto tag_and_append = [&](const T& component) { - auto frs = bb::tag_and_serialize(component, tag); + auto frs = bb::tag_and_serialize(component, tag); vk_elements.insert(vk_elements.end(), frs.begin(), frs.end()); }; @@ -304,23 +319,36 @@ class NativeVerificationKey_ : public PrecomputedCommitments { } // Sanitize free witness tags before hashing - bb::unset_free_witness_tags(vk_elements); + bb::unset_free_witness_tags(vk_elements); // Hash the tagged elements directly - return Transcript::HashFunction::hash(vk_elements); + return HashFunction::hash(vk_elements); + } + + /** + * @brief An overload that accepts a transcript and extracts the tag internally + * @tparam TranscriptType The transcript type (Codec and HashFunction deduced automatically) + * @param transcript The transcript to extract the origin tag from + * @returns The hash of the verification key + */ + template DataType hash_with_origin_tagging(const Transcript& transcript) const + { + const OriginTag tag = bb::extract_transcript_tag(transcript); + return hash_with_origin_tagging(tag); } }; /** * @brief Base Stdlib verification key class. * - * @tparam Builder - * @tparam FF - * @tparam PrecomputedCommitments + * @tparam Builder_ The circuit builder type + * @tparam PrecomputedCommitments The precomputed entities type + * @tparam NativeVerificationKey_ The native VK type (optional, enables native<->stdlib conversion) * @tparam SerializeMetadata Controls how metadata is serialized (FULL, NO_METADATA) */ template class StdlibVerificationKey_ : public PrecomputedCommitments { public: @@ -328,6 +356,7 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { using FF = stdlib::field_t; using Commitment = typename PrecomputedCommitments::DataType; using Transcript = StdlibTranscript; + using NativeVerificationKey = NativeVerificationKey_; FF log_circuit_size; FF num_public_inputs; FF pub_inputs_offset = 0; @@ -335,50 +364,88 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { bool operator==(const StdlibVerificationKey_&) const = default; virtual ~StdlibVerificationKey_() = default; StdlibVerificationKey_() = default; - StdlibVerificationKey_(const size_t circuit_size, const size_t num_public_inputs) + + /** + * @brief Construct a new Verification Key with stdlib types from a provided native verification key + * @details Only available when NativeVerificationKey is specified (not void) + */ + template + requires(!std::is_void_v) + StdlibVerificationKey_(Builder* builder, const std::shared_ptr& native_key) + : log_circuit_size(FF::from_witness(builder, typename FF::native(native_key->log_circuit_size))) + , num_public_inputs(FF::from_witness(builder, typename FF::native(native_key->num_public_inputs))) + , pub_inputs_offset(FF::from_witness(builder, typename FF::native(native_key->pub_inputs_offset))) { - this->log_circuit_size = numeric::get_msb(circuit_size); - this->num_public_inputs = num_public_inputs; - }; + + for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_key->get_all())) { + commitment = Commitment::from_witness(builder, native_commitment); + } + } /** - * @brief Serialize verification key to field elements. - * - * @return std::vector + * @brief Deserialize a verification key from a vector of field elements */ - virtual std::vector to_field_elements() const + explicit StdlibVerificationKey_(std::span elements) { using Codec = stdlib::StdlibCodec; - auto serialize_to_field_buffer = [](const T& input, std::vector& buffer) { - std::vector input_fields = Codec::template serialize_to_fields(input); - buffer.insert(buffer.end(), input_fields.begin(), input_fields.end()); - }; + size_t num_frs_read = 0; - std::vector elements; + this->log_circuit_size = Codec::template deserialize_from_frs(elements, num_frs_read); + this->num_public_inputs = Codec::template deserialize_from_frs(elements, num_frs_read); + this->pub_inputs_offset = Codec::template deserialize_from_frs(elements, num_frs_read); - serialize_to_field_buffer(this->log_circuit_size, elements); - serialize_to_field_buffer(this->num_public_inputs, elements); - serialize_to_field_buffer(this->pub_inputs_offset, elements); + for (Commitment& commitment : this->get_all()) { + commitment = Codec::template deserialize_from_frs(elements, num_frs_read); + } + } - for (const Commitment& commitment : this->get_all()) { - serialize_to_field_buffer(commitment, elements); + /** + * @brief Construct a VerificationKey from a set of corresponding witness indices + */ + static StdlibVerificationKey_ from_witness_indices(Builder& builder, + const std::span& witness_indices) + { + std::vector vk_fields; + vk_fields.reserve(witness_indices.size()); + for (const auto& idx : witness_indices) { + vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); } + return StdlibVerificationKey_(vk_fields); + } - return elements; - }; + /** + * @brief Fixes witnesses of VK to be constants. + */ + void fix_witness() + { + this->log_circuit_size.fix_witness(); + this->num_public_inputs.fix_witness(); + this->pub_inputs_offset.fix_witness(); + for (Commitment& commitment : this->get_all()) { + commitment.fix_witness(); + } + } +#ifndef NDEBUG /** - * @brief A model function to show how to compute the VK hash (without the Transcript abstracting things away). - * @details Currently only used in testing. - * @param builder - * @return FF + * @brief Get the native verification key corresponding to this stdlib verification key + * @details Only available when NativeVerificationKey is specified (not void) */ - FF hash() + template + requires(!std::is_void_v) + T get_value() const { - FF vk_hash = stdlib::poseidon2::hash(to_field_elements()); - return vk_hash; + T native_vk; + native_vk.log_circuit_size = static_cast(this->log_circuit_size.get_value()); + native_vk.num_public_inputs = static_cast(this->num_public_inputs.get_value()); + native_vk.pub_inputs_offset = static_cast(this->pub_inputs_offset.get_value()); + for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_vk.get_all())) { + native_commitment = commitment.get_value(); + } + return native_vk; } +#endif /** * @brief Tag VK components and hash. @@ -387,20 +454,18 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { * properly. By tagging the VK components directly, we ensure all VK witnesses have proper origin tags. * * @param domain_separator (currently unused, kept for API compatibility) - * @param transcript Used to extract tag context (transcript_index, round_index) + * @param tag The origin tag extracted from the transcript * @returns The hash of the verification key */ - virtual FF hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - Transcript& transcript) const + virtual FF hash_with_origin_tagging(const OriginTag& tag) const { using Codec = stdlib::StdlibCodec; + static constexpr bool in_circuit = true; // StdlibVerificationKey_ is always in-circuit std::vector vk_elements; - const OriginTag tag = bb::extract_transcript_tag(transcript); - // Tag, serialize, and append to vk_elements auto append_tagged = [&](const T& component) { - auto frs = bb::tag_and_serialize(component, tag); + auto frs = bb::tag_and_serialize(component, tag); vk_elements.insert(vk_elements.end(), frs.begin(), frs.end()); }; @@ -415,10 +480,22 @@ class StdlibVerificationKey_ : public PrecomputedCommitments { } // Sanitize free witness tags before hashing - bb::unset_free_witness_tags(vk_elements); + bb::unset_free_witness_tags(vk_elements); // Hash the tagged elements directly - return Transcript::HashFunction::hash(vk_elements); + return stdlib::poseidon2::hash(vk_elements); + } + + /** + * @brief An overload that accepts a transcript and extracts the tag internally + * @tparam TranscriptType The transcript type (Codec and HashFunction deduced automatically) + * @param transcript The transcript to extract the origin tag from + * @returns The hash of the verification key + */ + template FF hash_with_origin_tagging(const Transcript& transcript) const + { + const OriginTag tag = bb::extract_transcript_tag(transcript); + return hash_with_origin_tagging(tag); } }; @@ -482,12 +559,6 @@ template class VKAndHash_ { FF hash; }; -// Because of how Gemini is written, it is important to put the polynomials out in this order. -auto get_unshifted_then_shifted(const auto& all_entities) -{ - return concatenate(all_entities.get_unshifted(), all_entities.get_shifted()); -}; - /** * @brief Utility function to find max PARTIAL_RELATION_LENGTH tuples of Relations. * @details The "partial length" of a relation is 1 + the degree of the relation, where any challenges used in the @@ -583,17 +654,23 @@ template class MegaAvmRecursiveFlavor_; // Serialization methods for NativeVerificationKey_. // These should cover all base classes that do not need additional members, as long as the appropriate SerializeMetadata // is set in the template parameters. -template -inline void read(uint8_t const*& it, NativeVerificationKey_& vk) +template +inline void read( + uint8_t const*& it, + NativeVerificationKey_& vk) { using serialize::read; + using VK = NativeVerificationKey_; // Get the size directly from the static method - size_t num_frs = - NativeVerificationKey_::calc_num_data_types(); + size_t num_frs = VK::calc_num_data_types(); // Read exactly num_frs field elements from the buffer - std::vector field_elements(num_frs); + std::vector field_elements(num_frs); for (auto& element : field_elements) { read(it, element); } @@ -601,11 +678,18 @@ inline void read(uint8_t const*& it, NativeVerificationKey_ -inline void write(std::vector& buf, - NativeVerificationKey_ const& vk) +template +inline void write( + std::vector& buf, + NativeVerificationKey_ const& vk) { using serialize::write; + using VK = NativeVerificationKey_; + size_t before = buf.size(); // Convert to field elements and write them directly without length prefix auto field_elements = vk.to_field_elements(); @@ -613,8 +697,7 @@ inline void write(std::vector& buf, write(buf, element); } size_t after = buf.size(); - size_t num_frs = - NativeVerificationKey_::calc_num_data_types(); + size_t num_frs = VK::calc_num_data_types(); BB_ASSERT_EQ(after - before, num_frs * sizeof(bb::fr), "VK serialization mismatch"); } diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp index ddd24c3331f9..488d37d3e6f6 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_flavor.hpp @@ -42,7 +42,9 @@ class MegaFlavor { using CommitmentKey = bb::CommitmentKey; using VerifierCommitmentKey = bb::VerifierCommitmentKey; using TraceBlocks = MegaExecutionTraceBlocks; - using Transcript = NativeTranscript; + using Codec = FrCodec; + using HashFunction = crypto::Poseidon2; + using Transcript = BaseTranscript; // An upper bound on the size of the Mega-circuits. `CONST_FOLDING_LOG_N` bounds the log circuit sizes in the Chonk // context. @@ -451,40 +453,7 @@ class MegaFlavor { * circuits. * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/876) */ - class VerificationKey : public NativeVerificationKey_, Transcript> { - public: - VerificationKey() = default; - VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : NativeVerificationKey_(circuit_size, num_public_inputs) - {} - - VerificationKey(const VerificationKey& vk) = default; - - void set_metadata(const MetaData& metadata) - { - this->log_circuit_size = numeric::get_msb(metadata.dyadic_size); - this->num_public_inputs = metadata.num_public_inputs; - this->pub_inputs_offset = metadata.pub_inputs_offset; - } - - VerificationKey(const PrecomputedData& precomputed) - { - set_metadata(precomputed.metadata); - - CommitmentKey commitment_key{ precomputed.metadata.dyadic_size }; - for (auto [polynomial, commitment] : zip_view(precomputed.polynomials, this->get_all())) { - commitment = commitment_key.commit(polynomial); - } - } - -#ifndef NDEBUG - bool compare(const VerificationKey& other) - { - return NativeVerificationKey_, Transcript>::compare< - NUM_PRECOMPUTED_ENTITIES>(other, CommitmentLabels().get_precomputed()); - } -#endif - }; + using VerificationKey = NativeVerificationKey_, Codec, HashFunction, CommitmentKey>; using VKAndHash = VKAndHash_; diff --git a/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp index b1aca7610cf7..09c4bcd47807 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/mega_recursive_flavor.hpp @@ -7,7 +7,6 @@ #pragma once #include "barretenberg/commitment_schemes/commitment_key.hpp" #include "barretenberg/commitment_schemes/kzg/kzg.hpp" -#include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/flavor/flavor_macros.hpp" @@ -100,115 +99,9 @@ template class MegaRecursiveFlavor_ { using Base::Base; }; - /** - * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. - * This differs from Mega in how we construct the commitments. - */ - class VerificationKey : public StdlibVerificationKey_> { - - public: - using NativeVerificationKey = NativeFlavor::VerificationKey; - - /** - * @brief Construct a new Verification Key with stdlib types from a provided native verification - * key - * - * @param builder - * @param native_key Native verification key from which to extract the precomputed commitments - */ - VerificationKey(CircuitBuilder* builder, const std::shared_ptr& native_key) - { - this->log_circuit_size = FF::from_witness(builder, typename FF::native(native_key->log_circuit_size)); - this->num_public_inputs = FF::from_witness(builder, typename FF::native(native_key->num_public_inputs)); - this->pub_inputs_offset = FF::from_witness(builder, typename FF::native(native_key->pub_inputs_offset)); - - // Generate stdlib commitments (biggroup) from the native counterparts - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_key->get_all())) { - commitment = Commitment::from_witness(builder, native_commitment); - } - }; - - /** - * @brief Deserialize a verification key from a vector of field elements - * - * @param builder - * @param elements - */ - VerificationKey(std::span elements) - { - using Codec = stdlib::StdlibCodec; - - size_t num_frs_read = 0; - - this->log_circuit_size = Codec::template deserialize_from_frs(elements, num_frs_read); - this->num_public_inputs = Codec::template deserialize_from_frs(elements, num_frs_read); - this->pub_inputs_offset = Codec::template deserialize_from_frs(elements, num_frs_read); - - for (Commitment& commitment : this->get_all()) { - commitment = Codec::template deserialize_from_frs(elements, num_frs_read); - } - - if (num_frs_read != elements.size()) { - throw_or_abort("Invalid buffer length in VerificationKey constuctor from fields!"); - } - } - - /** - * @brief Construct a VerificationKey from a set of corresponding witness indices - * - * @param builder - * @param witness_indices - * @return VerificationKey - */ - static VerificationKey from_witness_indices(CircuitBuilder& builder, - const std::span& witness_indices) - { - std::vector vk_fields; - vk_fields.reserve(witness_indices.size()); - for (const auto& idx : witness_indices) { - vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); - } - return VerificationKey(vk_fields); - } - - /** - * @brief Fixes witnesses of VK to be constants. - * - */ - void fix_witness() - { - this->log_circuit_size.fix_witness(); - this->num_public_inputs.fix_witness(); - this->pub_inputs_offset.fix_witness(); - for (Commitment& commitment : this->get_all()) { - commitment.fix_witness(); - } - } - -#ifndef NDEBUG - /** - * @brief Get the native verification key corresponding to this stdlib verification key - * - * @return NativeVerificationKey - */ - NativeVerificationKey get_value() const - { - NativeVerificationKey native_vk; - native_vk.log_circuit_size = static_cast(this->log_circuit_size.get_value()); - native_vk.num_public_inputs = static_cast(this->num_public_inputs.get_value()); - native_vk.pub_inputs_offset = static_cast(this->pub_inputs_offset.get_value()); - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_vk.get_all())) { - native_commitment = commitment.get_value(); - } - return native_vk; - } -#endif - }; + using VerificationKey = StdlibVerificationKey_, + NativeFlavor::VerificationKey>; /** * @brief A container for the witness commitments. diff --git a/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp index 29824b1b55e5..3011fb94ab46 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/native_verification_key.test.cpp @@ -71,7 +71,7 @@ TYPED_TEST(NativeVerificationKeyTests, VKHashingConsistency) // (ECCVM and Translator flavors don't support hash_with_origin_tagging as their VKs are hardcoded) if constexpr (!IsAnyOf) { typename Flavor::Transcript transcript; - fr vk_hash_2 = vk.hash_with_origin_tagging("", transcript); + fr vk_hash_2 = vk.hash_with_origin_tagging(transcript); EXPECT_EQ(vk_hash_1, vk_hash_2); } } diff --git a/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp index d75451389b85..dfe2dcb4bb75 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/stdlib_verification_key.test.cpp @@ -4,6 +4,8 @@ #include "barretenberg/flavor/ultra_rollup_recursive_flavor.hpp" #include "barretenberg/srs/global_crs.hpp" #include "barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp" +#include "barretenberg/stdlib/hash/poseidon2/poseidon2.hpp" +#include "barretenberg/stdlib/primitives/field/field_conversion.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" #include "barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp" @@ -14,6 +16,35 @@ using namespace bb; +namespace { +/** + * @brief Test helper: Serialize stdlib VK to field elements and hash with poseidon2. + * @details This replicates the removed StdlibVerificationKey_::hash() method for testing purposes. + */ +template auto compute_stdlib_vk_hash(const VK& vk) +{ + using FF = typename VK::FF; + using Builder = typename VK::Builder; + using Codec = stdlib::StdlibCodec; + + auto serialize_to_field_buffer = [](const T& input, std::vector& buffer) { + std::vector input_fields = Codec::template serialize_to_fields(input); + buffer.insert(buffer.end(), input_fields.begin(), input_fields.end()); + }; + + std::vector elements; + serialize_to_field_buffer(vk.log_circuit_size, elements); + serialize_to_field_buffer(vk.num_public_inputs, elements); + serialize_to_field_buffer(vk.pub_inputs_offset, elements); + + for (const auto& commitment : vk.get_all()) { + serialize_to_field_buffer(commitment, elements); + } + + return stdlib::poseidon2::hash(elements); +} +} // namespace + template class StdlibVerificationKeyTests : public ::testing::Test { public: using NativeFlavor = typename Flavor::NativeFlavor; @@ -65,14 +96,14 @@ TYPED_TEST(StdlibVerificationKeyTests, VKHashingConsistency) OuterBuilder outer_builder; StdlibVerificationKey vk(&outer_builder, native_vk); - // First method of hashing: using hash(). - FF vk_hash_1 = vk.hash(); + // First method of hashing: serialize to fields and hash with poseidon2. + FF vk_hash_1 = compute_stdlib_vk_hash(vk); // Second method of hashing: using hash_with_origin_tagging. // (ECCVM and Translator recursive flavors don't support hash_with_origin_tagging as their VKs are hardcoded) if constexpr (!IsAnyOf) { StdlibTranscript transcript; - FF vk_hash_2 = vk.hash_with_origin_tagging("", transcript); + FF vk_hash_2 = vk.hash_with_origin_tagging(transcript); EXPECT_EQ(vk_hash_1.get_value(), vk_hash_2.get_value()); } } diff --git a/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp b/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp new file mode 100644 index 000000000000..908fb3d88ee0 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/flavor/test_utils/proof_structures.hpp @@ -0,0 +1,607 @@ +// === AUDIT STATUS === +// internal: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_1: { status: not started, auditors: [], date: YYYY-MM-DD } +// external_2: { status: not started, auditors: [], date: YYYY-MM-DD } +// ===================== + +#pragma once + +#include "barretenberg/flavor/mega_flavor.hpp" +#include "barretenberg/flavor/mega_zk_flavor.hpp" +#include "barretenberg/flavor/ultra_flavor.hpp" +#include "barretenberg/flavor/ultra_keccak_flavor.hpp" +#include "barretenberg/flavor/ultra_keccak_zk_flavor.hpp" +#include "barretenberg/flavor/ultra_rollup_flavor.hpp" +#include "barretenberg/flavor/ultra_zk_flavor.hpp" +#include "barretenberg/transcript/transcript.hpp" + +namespace bb { + +/** + * @brief Test utility for deserializing/serializing proof data into typed structures. + * @details This allows tests to inspect and modify specific proof elements. + * Each flavor has its own specialization due to different proof structures. + * + * @tparam Flavor The proving system flavor + */ +template struct StructuredProof; + +// ============================================================================ +// Common base with type definitions and helper methods +// ============================================================================ +template struct StructuredProofHelper { + using FF = typename Flavor::FF; + using Commitment = typename Flavor::Commitment; + using Transcript = typename Flavor::Transcript; + using Codec = typename Transcript::Codec; + using ProofData = typename Transcript::Proof; + static constexpr size_t BATCHED_RELATION_PARTIAL_LENGTH = Flavor::BATCHED_RELATION_PARTIAL_LENGTH; + static constexpr size_t NUM_ALL_ENTITIES = Flavor::NUM_ALL_ENTITIES; + + protected: + template static T deserialize_from_buffer(const ProofData& proof_data, size_t& offset) + { + constexpr size_t element_size = Codec::template calc_num_fields(); + BB_ASSERT_LTE(offset + element_size, proof_data.size()); + auto element_span = std::span{ proof_data }.subspan(offset, element_size); + offset += element_size; + return Codec::template deserialize_from_fields(element_span); + } + + template static void serialize_to_buffer(const T& element, ProofData& proof_data) + { + auto element_fields = Codec::serialize_to_fields(element); + proof_data.insert(proof_data.end(), element_fields.begin(), element_fields.end()); + } +}; + +// ============================================================================ +// Ultra proof structure base with common fields and helper methods +// ============================================================================ +template struct UltraStructuredProofBase : StructuredProofHelper { + using Base = StructuredProofHelper; + using Base::BATCHED_RELATION_PARTIAL_LENGTH; + using Base::NUM_ALL_ENTITIES; + using typename Base::Commitment; + using typename Base::FF; + using typename Base::ProofData; + + // Common fields shared between ZK and non-ZK + std::vector public_inputs; + Commitment w_l_comm; + Commitment w_r_comm; + Commitment w_o_comm; + Commitment lookup_read_counts_comm; + Commitment lookup_read_tags_comm; + Commitment w_4_comm; + Commitment z_perm_comm; + Commitment lookup_inverses_comm; + std::vector> sumcheck_univariates; + std::array sumcheck_evaluations; + std::vector gemini_fold_comms; + std::vector gemini_fold_evals; + Commitment shplonk_q_comm; + Commitment kzg_w_comm; + + protected: + void clear_vectors() + { + public_inputs.clear(); + sumcheck_univariates.clear(); + gemini_fold_comms.clear(); + gemini_fold_evals.clear(); + } + + // Helper: deserialize Ultra witness commitments + void deserialize_ultra_witness_comms(const ProofData& proof_data, size_t& offset) + { + w_l_comm = this->template deserialize_from_buffer(proof_data, offset); + w_r_comm = this->template deserialize_from_buffer(proof_data, offset); + w_o_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); + w_4_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + z_perm_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + // Helper: serialize Ultra witness commitments + void serialize_ultra_witness_comms(ProofData& proof_data) const + { + Base::serialize_to_buffer(w_l_comm, proof_data); + Base::serialize_to_buffer(w_r_comm, proof_data); + Base::serialize_to_buffer(w_o_comm, proof_data); + Base::serialize_to_buffer(lookup_read_counts_comm, proof_data); + Base::serialize_to_buffer(lookup_read_tags_comm, proof_data); + Base::serialize_to_buffer(w_4_comm, proof_data); + Base::serialize_to_buffer(lookup_inverses_comm, proof_data); + Base::serialize_to_buffer(z_perm_comm, proof_data); + } + + // Helper: deserialize sumcheck data + void deserialize_sumcheck(const ProofData& proof_data, size_t& offset, size_t log_n) + { + for (size_t i = 0; i < log_n; ++i) { + sumcheck_univariates.push_back( + this->template deserialize_from_buffer>(proof_data, + offset)); + } + sumcheck_evaluations = + this->template deserialize_from_buffer>(proof_data, offset); + } + + // Helper: serialize sumcheck data + void serialize_sumcheck(ProofData& proof_data, size_t log_n) const + { + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(sumcheck_univariates[i], proof_data); + } + Base::serialize_to_buffer(sumcheck_evaluations, proof_data); + } + + // Helper: deserialize Gemini/Shplonk/KZG data + void deserialize_pcs(const ProofData& proof_data, size_t& offset, size_t log_n) + { + for (size_t i = 0; i < log_n - 1; ++i) { + gemini_fold_comms.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + for (size_t i = 0; i < log_n; ++i) { + gemini_fold_evals.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + shplonk_q_comm = this->template deserialize_from_buffer(proof_data, offset); + kzg_w_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + // Helper: serialize Gemini/Shplonk/KZG data + void serialize_pcs(ProofData& proof_data, size_t log_n) const + { + for (size_t i = 0; i < log_n - 1; ++i) { + Base::serialize_to_buffer(gemini_fold_comms[i], proof_data); + } + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(gemini_fold_evals[i], proof_data); + } + Base::serialize_to_buffer(shplonk_q_comm, proof_data); + Base::serialize_to_buffer(kzg_w_comm, proof_data); + } + + public: + void deserialize(ProofData& proof_data, size_t num_public_inputs, size_t log_n) + { + size_t offset = 0; + clear_vectors(); + + for (size_t i = 0; i < num_public_inputs; ++i) { + public_inputs.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + deserialize_ultra_witness_comms(proof_data, offset); + deserialize_sumcheck(proof_data, offset, log_n); + deserialize_pcs(proof_data, offset, log_n); + } + + void serialize(ProofData& proof_data, size_t log_n) const + { + size_t old_size = proof_data.size(); + proof_data.clear(); + + for (const auto& pi : public_inputs) { + Base::serialize_to_buffer(pi, proof_data); + } + serialize_ultra_witness_comms(proof_data); + serialize_sumcheck(proof_data, log_n); + serialize_pcs(proof_data, log_n); + + BB_ASSERT_EQ(proof_data.size(), old_size); + } +}; + +// ============================================================================ +// Ultra ZK proof structure - extends Ultra with ZK-specific fields +// ============================================================================ +template struct UltraZKStructuredProofBase : UltraStructuredProofBase { + using Base = UltraStructuredProofBase; + using typename Base::Commitment; + using typename Base::FF; + using typename Base::ProofData; + + // ZK-specific fields + Commitment hiding_polynomial_commitment; + Commitment libra_concatenation_commitment; + FF libra_sum; + FF libra_claimed_evaluation; + Commitment libra_grand_sum_commitment; + Commitment libra_quotient_commitment; + FF libra_concatenation_eval; + FF libra_shifted_grand_sum_eval; + FF libra_grand_sum_eval; + FF libra_quotient_eval; + + void deserialize(ProofData& proof_data, size_t num_public_inputs, size_t log_n) + { + size_t offset = 0; + this->clear_vectors(); + + for (size_t i = 0; i < num_public_inputs; ++i) { + this->public_inputs.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + hiding_polynomial_commitment = this->template deserialize_from_buffer(proof_data, offset); + this->deserialize_ultra_witness_comms(proof_data, offset); + libra_concatenation_commitment = this->template deserialize_from_buffer(proof_data, offset); + libra_sum = this->template deserialize_from_buffer(proof_data, offset); + + // Sumcheck univariates + for (size_t i = 0; i < log_n; ++i) { + this->sumcheck_univariates.push_back( + this->template deserialize_from_buffer>( + proof_data, offset)); + } + libra_claimed_evaluation = this->template deserialize_from_buffer(proof_data, offset); + this->sumcheck_evaluations = + this->template deserialize_from_buffer>(proof_data, offset); + libra_grand_sum_commitment = this->template deserialize_from_buffer(proof_data, offset); + libra_quotient_commitment = this->template deserialize_from_buffer(proof_data, offset); + + // Gemini + for (size_t i = 0; i < log_n - 1; ++i) { + this->gemini_fold_comms.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + for (size_t i = 0; i < log_n; ++i) { + this->gemini_fold_evals.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + libra_concatenation_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_shifted_grand_sum_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_grand_sum_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_quotient_eval = this->template deserialize_from_buffer(proof_data, offset); + this->shplonk_q_comm = this->template deserialize_from_buffer(proof_data, offset); + this->kzg_w_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + void serialize(ProofData& proof_data, size_t log_n) const + { + size_t old_size = proof_data.size(); + proof_data.clear(); + + for (const auto& pi : this->public_inputs) { + Base::serialize_to_buffer(pi, proof_data); + } + Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); + this->serialize_ultra_witness_comms(proof_data); + Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); + Base::serialize_to_buffer(libra_sum, proof_data); + + // Sumcheck univariates + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(this->sumcheck_univariates[i], proof_data); + } + Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); + Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); + Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); + Base::serialize_to_buffer(libra_quotient_commitment, proof_data); + + // Gemini + for (size_t i = 0; i < log_n - 1; ++i) { + Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); + } + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); + } + Base::serialize_to_buffer(libra_concatenation_eval, proof_data); + Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_quotient_eval, proof_data); + Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); + Base::serialize_to_buffer(this->kzg_w_comm, proof_data); + + BB_ASSERT_EQ(proof_data.size(), old_size); + } +}; + +// ============================================================================ +// Mega proof structure base with common fields and helper methods +// ============================================================================ +template struct MegaStructuredProofBase : StructuredProofHelper { + using Base = StructuredProofHelper; + using Base::BATCHED_RELATION_PARTIAL_LENGTH; + using Base::NUM_ALL_ENTITIES; + using typename Base::Commitment; + using typename Base::FF; + using typename Base::ProofData; + + // Common fields shared between ZK and non-ZK + std::vector public_inputs; + Commitment w_l_comm; + Commitment w_r_comm; + Commitment w_o_comm; + Commitment ecc_op_wire_1_comm; + Commitment ecc_op_wire_2_comm; + Commitment ecc_op_wire_3_comm; + Commitment ecc_op_wire_4_comm; + Commitment calldata_comm; + Commitment calldata_read_counts_comm; + Commitment calldata_read_tags_comm; + Commitment secondary_calldata_comm; + Commitment secondary_calldata_read_counts_comm; + Commitment secondary_calldata_read_tags_comm; + Commitment return_data_comm; + Commitment return_data_read_counts_comm; + Commitment return_data_read_tags_comm; + Commitment lookup_read_counts_comm; + Commitment lookup_read_tags_comm; + Commitment w_4_comm; + Commitment lookup_inverses_comm; + Commitment calldata_inverses_comm; + Commitment secondary_calldata_inverses_comm; + Commitment return_data_inverses_comm; + Commitment z_perm_comm; + std::vector> sumcheck_univariates; + std::array sumcheck_evaluations; + std::vector gemini_fold_comms; + std::vector gemini_fold_evals; + Commitment shplonk_q_comm; + Commitment kzg_w_comm; + + protected: + void clear_vectors() + { + public_inputs.clear(); + sumcheck_univariates.clear(); + gemini_fold_comms.clear(); + gemini_fold_evals.clear(); + } + + // Helper: deserialize Mega witness commitments + void deserialize_mega_witness_comms(const ProofData& proof_data, size_t& offset) + { + w_l_comm = this->template deserialize_from_buffer(proof_data, offset); + w_r_comm = this->template deserialize_from_buffer(proof_data, offset); + w_o_comm = this->template deserialize_from_buffer(proof_data, offset); + ecc_op_wire_1_comm = this->template deserialize_from_buffer(proof_data, offset); + ecc_op_wire_2_comm = this->template deserialize_from_buffer(proof_data, offset); + ecc_op_wire_3_comm = this->template deserialize_from_buffer(proof_data, offset); + ecc_op_wire_4_comm = this->template deserialize_from_buffer(proof_data, offset); + calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + calldata_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); + secondary_calldata_comm = this->template deserialize_from_buffer(proof_data, offset); + secondary_calldata_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + secondary_calldata_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); + return_data_comm = this->template deserialize_from_buffer(proof_data, offset); + return_data_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + return_data_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_read_counts_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_read_tags_comm = this->template deserialize_from_buffer(proof_data, offset); + w_4_comm = this->template deserialize_from_buffer(proof_data, offset); + lookup_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + secondary_calldata_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + return_data_inverses_comm = this->template deserialize_from_buffer(proof_data, offset); + z_perm_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + // Helper: serialize Mega witness commitments + void serialize_mega_witness_comms(ProofData& proof_data) const + { + Base::serialize_to_buffer(w_l_comm, proof_data); + Base::serialize_to_buffer(w_r_comm, proof_data); + Base::serialize_to_buffer(w_o_comm, proof_data); + Base::serialize_to_buffer(ecc_op_wire_1_comm, proof_data); + Base::serialize_to_buffer(ecc_op_wire_2_comm, proof_data); + Base::serialize_to_buffer(ecc_op_wire_3_comm, proof_data); + Base::serialize_to_buffer(ecc_op_wire_4_comm, proof_data); + Base::serialize_to_buffer(calldata_comm, proof_data); + Base::serialize_to_buffer(calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(calldata_read_tags_comm, proof_data); + Base::serialize_to_buffer(secondary_calldata_comm, proof_data); + Base::serialize_to_buffer(secondary_calldata_read_counts_comm, proof_data); + Base::serialize_to_buffer(secondary_calldata_read_tags_comm, proof_data); + Base::serialize_to_buffer(return_data_comm, proof_data); + Base::serialize_to_buffer(return_data_read_counts_comm, proof_data); + Base::serialize_to_buffer(return_data_read_tags_comm, proof_data); + Base::serialize_to_buffer(lookup_read_counts_comm, proof_data); + Base::serialize_to_buffer(lookup_read_tags_comm, proof_data); + Base::serialize_to_buffer(w_4_comm, proof_data); + Base::serialize_to_buffer(lookup_inverses_comm, proof_data); + Base::serialize_to_buffer(calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(secondary_calldata_inverses_comm, proof_data); + Base::serialize_to_buffer(return_data_inverses_comm, proof_data); + Base::serialize_to_buffer(z_perm_comm, proof_data); + } + + // Helper: deserialize sumcheck data + void deserialize_sumcheck(const ProofData& proof_data, size_t& offset, size_t log_n) + { + for (size_t i = 0; i < log_n; ++i) { + sumcheck_univariates.push_back( + this->template deserialize_from_buffer>(proof_data, + offset)); + } + sumcheck_evaluations = + this->template deserialize_from_buffer>(proof_data, offset); + } + + // Helper: serialize sumcheck data + void serialize_sumcheck(ProofData& proof_data, size_t log_n) const + { + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(sumcheck_univariates[i], proof_data); + } + Base::serialize_to_buffer(sumcheck_evaluations, proof_data); + } + + // Helper: deserialize Gemini/Shplonk/KZG data + void deserialize_pcs(const ProofData& proof_data, size_t& offset, size_t log_n) + { + for (size_t i = 0; i < log_n - 1; ++i) { + gemini_fold_comms.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + for (size_t i = 0; i < log_n; ++i) { + gemini_fold_evals.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + shplonk_q_comm = this->template deserialize_from_buffer(proof_data, offset); + kzg_w_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + // Helper: serialize Gemini/Shplonk/KZG data + void serialize_pcs(ProofData& proof_data, size_t log_n) const + { + for (size_t i = 0; i < log_n - 1; ++i) { + Base::serialize_to_buffer(gemini_fold_comms[i], proof_data); + } + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(gemini_fold_evals[i], proof_data); + } + Base::serialize_to_buffer(shplonk_q_comm, proof_data); + Base::serialize_to_buffer(kzg_w_comm, proof_data); + } + + public: + void deserialize(ProofData& proof_data, size_t num_public_inputs, size_t log_n) + { + size_t offset = 0; + clear_vectors(); + + for (size_t i = 0; i < num_public_inputs; ++i) { + public_inputs.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + deserialize_mega_witness_comms(proof_data, offset); + deserialize_sumcheck(proof_data, offset, log_n); + deserialize_pcs(proof_data, offset, log_n); + } + + void serialize(ProofData& proof_data, size_t log_n) const + { + size_t old_size = proof_data.size(); + proof_data.clear(); + + for (const auto& pi : public_inputs) { + Base::serialize_to_buffer(pi, proof_data); + } + serialize_mega_witness_comms(proof_data); + serialize_sumcheck(proof_data, log_n); + serialize_pcs(proof_data, log_n); + + BB_ASSERT_EQ(proof_data.size(), old_size); + } +}; + +// ============================================================================ +// Mega ZK proof structure - extends Mega with ZK-specific fields +// ============================================================================ +template struct MegaZKStructuredProofBase : MegaStructuredProofBase { + using Base = MegaStructuredProofBase; + using typename Base::Commitment; + using typename Base::FF; + using typename Base::ProofData; + + // ZK-specific fields + Commitment hiding_polynomial_commitment; + Commitment libra_concatenation_commitment; + FF libra_sum; + FF libra_claimed_evaluation; + Commitment libra_grand_sum_commitment; + Commitment libra_quotient_commitment; + FF libra_concatenation_eval; + FF libra_shifted_grand_sum_eval; + FF libra_grand_sum_eval; + FF libra_quotient_eval; + + void deserialize(ProofData& proof_data, size_t num_public_inputs, size_t log_n) + { + size_t offset = 0; + this->clear_vectors(); + + for (size_t i = 0; i < num_public_inputs; ++i) { + this->public_inputs.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + hiding_polynomial_commitment = this->template deserialize_from_buffer(proof_data, offset); + this->deserialize_mega_witness_comms(proof_data, offset); + libra_concatenation_commitment = this->template deserialize_from_buffer(proof_data, offset); + libra_sum = this->template deserialize_from_buffer(proof_data, offset); + + // Sumcheck univariates + for (size_t i = 0; i < log_n; ++i) { + this->sumcheck_univariates.push_back( + this->template deserialize_from_buffer>( + proof_data, offset)); + } + libra_claimed_evaluation = this->template deserialize_from_buffer(proof_data, offset); + this->sumcheck_evaluations = + this->template deserialize_from_buffer>(proof_data, offset); + libra_grand_sum_commitment = this->template deserialize_from_buffer(proof_data, offset); + libra_quotient_commitment = this->template deserialize_from_buffer(proof_data, offset); + + // Gemini + for (size_t i = 0; i < log_n - 1; ++i) { + this->gemini_fold_comms.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + for (size_t i = 0; i < log_n; ++i) { + this->gemini_fold_evals.push_back(this->template deserialize_from_buffer(proof_data, offset)); + } + libra_concatenation_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_shifted_grand_sum_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_grand_sum_eval = this->template deserialize_from_buffer(proof_data, offset); + libra_quotient_eval = this->template deserialize_from_buffer(proof_data, offset); + this->shplonk_q_comm = this->template deserialize_from_buffer(proof_data, offset); + this->kzg_w_comm = this->template deserialize_from_buffer(proof_data, offset); + } + + void serialize(ProofData& proof_data, size_t log_n) const + { + size_t old_size = proof_data.size(); + proof_data.clear(); + + for (const auto& pi : this->public_inputs) { + Base::serialize_to_buffer(pi, proof_data); + } + Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); + this->serialize_mega_witness_comms(proof_data); + Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); + Base::serialize_to_buffer(libra_sum, proof_data); + + // Sumcheck univariates + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(this->sumcheck_univariates[i], proof_data); + } + Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); + Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); + Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); + Base::serialize_to_buffer(libra_quotient_commitment, proof_data); + + // Gemini + for (size_t i = 0; i < log_n - 1; ++i) { + Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); + } + for (size_t i = 0; i < log_n; ++i) { + Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); + } + Base::serialize_to_buffer(libra_concatenation_eval, proof_data); + Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); + Base::serialize_to_buffer(libra_quotient_eval, proof_data); + Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); + Base::serialize_to_buffer(this->kzg_w_comm, proof_data); + + BB_ASSERT_EQ(proof_data.size(), old_size); + } +}; + +// ============================================================================ +// Flavor Specializations +// ============================================================================ + +// Ultra flavors (non-ZK) +// Note: UltraRollupFlavor's IPA proof is handled separately by prover_instance->ipa_proof, +// so StructuredProof only needs to handle the Ultra portion from the transcript. +template <> struct StructuredProof : UltraStructuredProofBase {}; +template <> struct StructuredProof : UltraStructuredProofBase {}; +template <> struct StructuredProof : UltraStructuredProofBase {}; + +// Ultra ZK flavors +template <> struct StructuredProof : UltraZKStructuredProofBase {}; +template <> struct StructuredProof : UltraZKStructuredProofBase {}; + +// Mega flavors +template <> struct StructuredProof : MegaStructuredProofBase {}; +template <> struct StructuredProof : MegaZKStructuredProofBase {}; + +} // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp index a2e93d4a4025..f7c8a55f6cb8 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_flavor.hpp @@ -43,6 +43,9 @@ class UltraFlavor { using Polynomial = bb::Polynomial; using CommitmentKey = bb::CommitmentKey; using VerifierCommitmentKey = bb::VerifierCommitmentKey; + using Codec = FrCodec; + using HashFunction = crypto::Poseidon2; + using Transcript = BaseTranscript; static constexpr size_t VIRTUAL_LOG_N = CONST_PROOF_SIZE_LOG_N; // indicates when evaluating sumcheck, edges can be left as degree-1 monomials @@ -364,129 +367,6 @@ class UltraFlavor { using PrecomputedData = PrecomputedData_; - /** - * @brief Derived class that defines proof structure for Ultra proofs, as well as supporting functions. - * - */ - template class Transcript_ : public BaseTranscript { - public: - using Base = BaseTranscript; - - using Base::Base; // Inherit base class constructors - - // Transcript objects defined as public member variables for easy access and modification - std::vector public_inputs; - Commitment w_l_comm; - Commitment w_r_comm; - Commitment w_o_comm; - Commitment lookup_read_counts_comm; - Commitment lookup_read_tags_comm; - Commitment w_4_comm; - Commitment z_perm_comm; - Commitment lookup_inverses_comm; - std::vector> sumcheck_univariates; - std::array sumcheck_evaluations; - std::vector gemini_fold_comms; - std::vector gemini_fold_evals; - Commitment shplonk_q_comm; - Commitment kzg_w_comm; - Transcript_() = default; - - static std::shared_ptr prover_init_empty() - { - auto transcript = Base::prover_init_empty(); - return std::static_pointer_cast(transcript); - }; - - static std::shared_ptr verifier_init_empty(const std::shared_ptr& transcript) - { - auto verifier_transcript = Base::verifier_init_empty(transcript); - return std::static_pointer_cast(verifier_transcript); - }; - - /** - * @brief Takes a FULL Ultra proof and deserializes it into the public member variables - * that compose the structure. Must be called in order to access the structure of the - * proof. - * - */ - void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = VIRTUAL_LOG_N) - { - // take current proof and put them into the struct - auto& proof_data = this->proof_data; - size_t num_frs_read = 0; - for (size_t i = 0; i < public_input_size; ++i) { - public_inputs.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - w_l_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - w_r_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - w_o_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - lookup_read_counts_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - lookup_read_tags_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - w_4_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - lookup_inverses_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - z_perm_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - for (size_t i = 0; i < virtual_log_n; ++i) { - sumcheck_univariates.push_back( - Base::template deserialize_from_buffer>( - proof_data, num_frs_read)); - } - sumcheck_evaluations = - Base::template deserialize_from_buffer>(proof_data, num_frs_read); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - gemini_fold_comms.push_back( - Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - gemini_fold_evals.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - shplonk_q_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - kzg_w_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - } - - /** - * @brief Serializes the structure variables into a FULL Ultra proof. Should be called - * only if deserialize_full_transcript() was called and some transcript variable was - * modified. - * - */ - void serialize_full_transcript(size_t virtual_log_n = VIRTUAL_LOG_N) - { - auto& proof_data = this->proof_data; - size_t old_proof_length = proof_data.size(); - proof_data.clear(); // clear proof_data so the rest of the function can replace it - for (const auto& public_input : public_inputs) { - Base::serialize_to_buffer(public_input, proof_data); - } - Base::serialize_to_buffer(w_l_comm, proof_data); - Base::serialize_to_buffer(w_r_comm, proof_data); - Base::serialize_to_buffer(w_o_comm, proof_data); - Base::serialize_to_buffer(lookup_read_counts_comm, proof_data); - Base::serialize_to_buffer(lookup_read_tags_comm, proof_data); - Base::serialize_to_buffer(w_4_comm, proof_data); - Base::serialize_to_buffer(lookup_inverses_comm, proof_data); - Base::serialize_to_buffer(z_perm_comm, proof_data); - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(sumcheck_univariates[i], proof_data); - } - Base::serialize_to_buffer(sumcheck_evaluations, proof_data); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::serialize_to_buffer(gemini_fold_comms[i], proof_data); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(gemini_fold_evals[i], proof_data); - } - Base::serialize_to_buffer(shplonk_q_comm, proof_data); - Base::serialize_to_buffer(kzg_w_comm, proof_data); - - // sanity check to make sure we generate the same length of proof as before. - BB_ASSERT_EQ(proof_data.size(), old_proof_length); - } - }; - - using Transcript = Transcript_>; - /** * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) * polynomials used by the verifier. @@ -495,34 +375,7 @@ class UltraFlavor { * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our * circuits. */ - class VerificationKey : public NativeVerificationKey_, Transcript> { - public: - bool operator==(const VerificationKey&) const = default; - VerificationKey() = default; - VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : NativeVerificationKey_(circuit_size, num_public_inputs) - {} - - VerificationKey(const PrecomputedData& precomputed) - { - this->log_circuit_size = numeric::get_msb(precomputed.metadata.dyadic_size); - this->num_public_inputs = precomputed.metadata.num_public_inputs; - this->pub_inputs_offset = precomputed.metadata.pub_inputs_offset; - - CommitmentKey commitment_key{ precomputed.metadata.dyadic_size }; - for (auto [polynomial, commitment] : zip_view(precomputed.polynomials, this->get_all())) { - commitment = commitment_key.commit(polynomial); - } - } - -#ifndef NDEBUG - bool compare(const VerificationKey& other) - { - return NativeVerificationKey_, Transcript>::compare< - NUM_PRECOMPUTED_ENTITIES>(other, CommitmentLabels().get_precomputed()); - } -#endif - }; + using VerificationKey = NativeVerificationKey_, Codec, HashFunction, CommitmentKey>; using VKAndHash = VKAndHash_; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp index 2e4574af4f10..51a989cf9ae8 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_flavor.hpp @@ -12,7 +12,9 @@ namespace bb { class UltraKeccakFlavor : public bb::UltraFlavor { public: - using Transcript = UltraKeccakFlavor::Transcript_; + using Codec = U256Codec; + using HashFunction = bb::crypto::Keccak; + using Transcript = BaseTranscript; static constexpr bool USE_PADDING = false; @@ -40,46 +42,7 @@ class UltraKeccakFlavor : public bb::UltraFlavor { return OINK_PROOF_LENGTH_WITHOUT_PUB_INPUTS + DECIDER_PROOF_LENGTH(virtual_log_n); } - /** - * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. - */ - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1094): Add aggregation to the verifier contract so the - // VerificationKey from UltraFlavor can be used - class VerificationKey : public NativeVerificationKey_, Transcript> { - public: - static constexpr size_t VERIFICATION_KEY_LENGTH = - /* 1. Metadata (log_circuit_size, num_public_inputs, pub_inputs_offset) */ (3 * num_elements_fr) + - /* 2. NUM_PRECOMPUTED_ENTITIES commitments */ (NUM_PRECOMPUTED_ENTITIES * num_elements_comm); - VerificationKey() = default; - VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : NativeVerificationKey_(circuit_size, num_public_inputs) - {} - - VerificationKey(const PrecomputedData& precomputed) - { - this->log_circuit_size = numeric::get_msb(precomputed.metadata.dyadic_size); - this->num_public_inputs = precomputed.metadata.num_public_inputs; - this->pub_inputs_offset = precomputed.metadata.pub_inputs_offset; - - CommitmentKey commitment_key{ precomputed.metadata.dyadic_size }; - for (auto [polynomial, commitment] : zip_view(precomputed.polynomials, this->get_all())) { - commitment = commitment_key.commit(polynomial); - } - } - -#ifndef NDEBUG - bool compare(const VerificationKey& other) - { - return NativeVerificationKey_, Transcript>::compare< - NUM_PRECOMPUTED_ENTITIES>(other, CommitmentLabels().get_precomputed()); - } -#endif - }; + using VerificationKey = NativeVerificationKey_, Codec, HashFunction, CommitmentKey>; // Specialize for Ultra (general case used in UltraRecursive). using VerifierCommitments = VerifierCommitments_; diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp index 7cd4020c9c4e..de3b9504d92e 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_keccak_zk_flavor.hpp @@ -78,149 +78,7 @@ class UltraKeccakZKFlavor : public UltraKeccakFlavor { /* 13. KZG W commitment */ (num_elements_comm); } - /** - * @brief Derived class that defines proof structure for Ultra zero knowledge proofs, as well as supporting - * functions. - * - */ - class Transcript : public UltraKeccakFlavor::Transcript { - public: - using Base = UltraKeccakFlavor::Transcript::Base; - // Override sumcheck_evaluations to use the correct size for ZK flavor - std::array sumcheck_evaluations; - // Note: we have a different vector of univariates because the degree for ZK flavors differs - std::vector> zk_sumcheck_univariates; - Commitment libra_concatenation_commitment; - FF libra_sum; - FF libra_claimed_evaluation; - Commitment libra_grand_sum_commitment; - Commitment libra_quotient_commitment; - FF libra_concatenation_eval; - FF libra_shifted_grand_sum_eval; - FF libra_grand_sum_eval; - FF libra_quotient_eval; - Commitment hiding_polynomial_commitment; - FF hiding_polynomial_eval; - - Transcript() = default; - - static std::shared_ptr prover_init_empty() - { - auto transcript = Base::prover_init_empty(); - return std::static_pointer_cast(transcript); - }; - - static std::shared_ptr verifier_init_empty(const std::shared_ptr& transcript) - { - auto verifier_transcript = Base::verifier_init_empty(transcript); - return std::static_pointer_cast(verifier_transcript); - }; - - /** - * @brief Takes a FULL Ultra proof and deserializes it into the public member variables - * that compose the structure. Must be called in order to access the structure of the - * proof. - * - */ - void deserialize_full_transcript(size_t public_input_size, size_t virtual_log_n = VIRTUAL_LOG_N) - { - // take current proof and put them into the struct - size_t num_frs_read = 0; - auto& proof_data = this->proof_data; - for (size_t i = 0; i < public_input_size; ++i) { - this->public_inputs.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - hiding_polynomial_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_l_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_r_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_o_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_counts_comm = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_tags_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_4_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_inverses_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->z_perm_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_concatenation_commitment = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_sum = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - for (size_t i = 0; i < virtual_log_n; ++i) { - zk_sumcheck_univariates.push_back( - Base::template deserialize_from_buffer>( - proof_data, num_frs_read)); - } - libra_claimed_evaluation = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->sumcheck_evaluations = - Base::template deserialize_from_buffer>(proof_data, num_frs_read); - libra_grand_sum_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - this->gemini_fold_comms.push_back( - Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - this->gemini_fold_evals.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - libra_concatenation_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_shifted_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->shplonk_q_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - this->kzg_w_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - } - - /** - * @brief Serializes the structure variables into a FULL Ultra proof. Should be called - * only if deserialize_full_transcript() was called and some transcript variable was - * modified. - * - */ - void serialize_full_transcript(size_t virtual_log_n = VIRTUAL_LOG_N) - { - auto& proof_data = this->proof_data; - size_t old_proof_length = proof_data.size(); - proof_data.clear(); // clear proof_data so the rest of the function can replace it - for (const auto& public_input : this->public_inputs) { - Base::serialize_to_buffer(public_input, proof_data); - } - Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); - Base::serialize_to_buffer(this->w_l_comm, proof_data); - Base::serialize_to_buffer(this->w_r_comm, proof_data); - Base::serialize_to_buffer(this->w_o_comm, proof_data); - Base::serialize_to_buffer(this->lookup_read_counts_comm, proof_data); - Base::serialize_to_buffer(this->lookup_read_tags_comm, proof_data); - Base::serialize_to_buffer(this->w_4_comm, proof_data); - Base::serialize_to_buffer(this->lookup_inverses_comm, proof_data); - Base::serialize_to_buffer(this->z_perm_comm, proof_data); - Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); - Base::serialize_to_buffer(libra_sum, proof_data); - - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); - } - Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); - - Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); - Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); - Base::serialize_to_buffer(libra_quotient_commitment, proof_data); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); - } - Base::serialize_to_buffer(libra_concatenation_eval, proof_data); - Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); - Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); - Base::serialize_to_buffer(libra_quotient_eval, proof_data); - Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); - Base::serialize_to_buffer(this->kzg_w_comm, proof_data); - - BB_ASSERT_EQ(proof_data.size(), old_proof_length); - } - }; - + using Transcript = UltraKeccakFlavor::Transcript; using VKAndHash = UltraKeccakFlavor::VKAndHash; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp index b98113632b78..cdf488f410e7 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive_flavor.hpp @@ -90,94 +90,9 @@ template class UltraRecursiveFlavor_ { static constexpr size_t NUM_SUBRELATIONS = NativeFlavor::NUM_SUBRELATIONS; using SubrelationSeparator = FF; - /** - * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. - */ - class VerificationKey : public StdlibVerificationKey_> { - public: - using NativeVerificationKey = NativeFlavor::VerificationKey; - - /** - * @brief Construct a new Verification Key with stdlib types from a provided native verification key - * - * @param builder - * @param native_key Native verification key from which to extract the precomputed commitments - */ - VerificationKey(CircuitBuilder* builder, const std::shared_ptr& native_key) - { - this->log_circuit_size = FF::from_witness(builder, typename FF::native(native_key->log_circuit_size)); - this->num_public_inputs = FF::from_witness(builder, typename FF::native(native_key->num_public_inputs)); - this->pub_inputs_offset = FF::from_witness(builder, typename FF::native(native_key->pub_inputs_offset)); - - // Generate stdlib commitments (biggroup) from the native counterparts - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_key->get_all())) { - commitment = Commitment::from_witness(builder, native_commitment); - } - }; - - /** - * @brief Deserialize a verification key from a vector of field elements - * - * @param builder - * @param elements - */ - VerificationKey(std::span elements) - { - using Codec = stdlib::StdlibCodec; - - size_t num_frs_read = 0; - - this->log_circuit_size = Codec::template deserialize_from_frs(elements, num_frs_read); - this->num_public_inputs = Codec::template deserialize_from_frs(elements, num_frs_read); - this->pub_inputs_offset = Codec::template deserialize_from_frs(elements, num_frs_read); - - for (Commitment& commitment : this->get_all()) { - commitment = Codec::template deserialize_from_frs(elements, num_frs_read); - } - } - - /** - * @brief Construct a VerificationKey from a set of corresponding witness indices - * - * @param builder - * @param witness_indices - * @return VerificationKey - */ - static VerificationKey from_witness_indices(CircuitBuilder& builder, - const std::span& witness_indices) - { - std::vector vk_fields; - vk_fields.reserve(witness_indices.size()); - for (const auto& idx : witness_indices) { - vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); - } - return VerificationKey(vk_fields); - } - -#ifndef NDEBUG - /** - * @brief Get the native verification key corresponding to this stdlib verification key - * - * @return NativeVerificationKey - */ - NativeVerificationKey get_value() const - { - NativeVerificationKey native_vk; - native_vk.log_circuit_size = static_cast(this->log_circuit_size.get_value()); - native_vk.num_public_inputs = static_cast(this->num_public_inputs.get_value()); - native_vk.pub_inputs_offset = static_cast(this->pub_inputs_offset.get_value()); - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_vk.get_all())) { - native_commitment = commitment.get_value(); - } - return native_vk; - } -#endif - }; + using VerificationKey = StdlibVerificationKey_, + typename NativeFlavor::VerificationKey>; /** * @brief A field element for each entity of the flavor. These entities represent the prover polynomials diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp index 68c1c1d7252b..f98d014ca025 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_flavor.hpp @@ -7,63 +7,19 @@ #pragma once #include "barretenberg/commitment_schemes/ipa/ipa.hpp" #include "barretenberg/flavor/ultra_flavor.hpp" -#include "barretenberg/special_public_inputs/special_public_inputs.hpp" namespace bb { +/** + * @brief UltraRollupFlavor extends UltraFlavor with IPA proof support. + * @details The only difference from UltraFlavor is that PROOF_LENGTH_WITHOUT_PUB_INPUTS includes IPA_PROOF_LENGTH. + */ class UltraRollupFlavor : public bb::UltraFlavor { public: - static constexpr size_t num_frs_comm = FrCodec::calc_num_fields(); - static constexpr size_t num_frs_fr = FrCodec::calc_num_fields(); static constexpr size_t PROOF_LENGTH_WITHOUT_PUB_INPUTS(size_t virtual_log_n = VIRTUAL_LOG_N) { return UltraFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS(virtual_log_n) + IPA_PROOF_LENGTH; } - static constexpr size_t BACKEND_PUB_INPUTS_SIZE = RollupIO::PUBLIC_INPUTS_SIZE; - - using UltraFlavor::UltraFlavor; - - /** - * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. - */ - class VerificationKey : public NativeVerificationKey_, Transcript> { - public: - virtual ~VerificationKey() = default; - - bool operator==(const VerificationKey&) const = default; - VerificationKey() = default; - VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : NativeVerificationKey_(circuit_size, num_public_inputs) - {} - - VerificationKey(const PrecomputedData& precomputed) - { - this->log_circuit_size = numeric::get_msb(precomputed.metadata.dyadic_size); - this->num_public_inputs = precomputed.metadata.num_public_inputs; - this->pub_inputs_offset = precomputed.metadata.pub_inputs_offset; - - CommitmentKey commitment_key{ precomputed.metadata.dyadic_size }; - for (auto [polynomial, commitment] : zip_view(precomputed.polynomials, this->get_all())) { - commitment = commitment_key.commit(polynomial); - } - } - -#ifndef NDEBUG - bool compare(const VerificationKey& other) - { - return NativeVerificationKey_, Transcript>::compare< - NUM_PRECOMPUTED_ENTITIES>(other, CommitmentLabels().get_precomputed()); - } -#endif - }; - - using VerifierCommitments = VerifierCommitments_; - using VKAndHash = VKAndHash_; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp index 498dcc88aa71..16fbd61dae1e 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_rollup_recursive_flavor.hpp @@ -5,142 +5,23 @@ // ===================== #pragma once -#include "barretenberg/commitment_schemes/commitment_key.hpp" -#include "barretenberg/commitment_schemes/kzg/kzg.hpp" -#include "barretenberg/ecc/curves/bn254/g1.hpp" -#include "barretenberg/flavor/flavor.hpp" -#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/flavor/ultra_recursive_flavor.hpp" #include "barretenberg/flavor/ultra_rollup_flavor.hpp" -#include "barretenberg/polynomials/barycentric.hpp" -#include "barretenberg/polynomials/evaluation_domain.hpp" -#include "barretenberg/polynomials/univariate.hpp" -#include "barretenberg/stdlib/primitives/curves/bn254.hpp" -#include "barretenberg/stdlib/primitives/field/field.hpp" -#include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" namespace bb { /** * @brief The recursive counterpart to the "native" UltraRollupFlavor. - * @details This flavor can be used to instantiate a recursive Mega Honk verifier for a proof created using the - * MegaZKFlavor. It is similar in structure to its native counterpart with two main differences: 1) the - * curve types are stdlib types (e.g. field_t instead of field) and 2) it does not specify any Prover related types - * (e.g. Polynomial, ExtendedEdges, etc.) since we do not emulate prover computation in circuits, i.e. it only makes - * sense to instantiate a Verifier with this flavor. - * - * @note Unlike conventional flavors, "recursive" flavors are templated by a builder (much like native vs stdlib types). - * This is because the flavor itself determines the details of the underlying verifier algorithm (i.e. the set of - * relations), while the Builder determines the arithmetization of that algorithm into a circuit. + * @details Nearly identical to UltraRecursiveFlavor_, but with NativeFlavor = UltraRollupFlavor. + * This distinction is needed for: + * 1. Concept checks (e.g., HasIPAAccumulator) that trigger different code paths + * 2. Access to UltraRollupFlavor-specific PROOF_LENGTH_WITHOUT_PUB_INPUTS * * @tparam BuilderType Determines the arithmetization of the verifier circuit defined based on this flavor. */ template class UltraRollupRecursiveFlavor_ : public UltraRecursiveFlavor_ { public: - using CircuitBuilder = BuilderType; // Determines arithmetization of circuit instantiated with this flavor using NativeFlavor = UltraRollupFlavor; - using Curve = UltraRecursiveFlavor_::Curve; - using PCS = KZG; - using GroupElement = typename Curve::Element; - using Commitment = typename Curve::Element; - using FF = typename Curve::ScalarField; - using VerifierCommitmentKey = bb::VerifierCommitmentKey; - using NativeVerificationKey = NativeFlavor::VerificationKey; - using Transcript = UltraRecursiveFlavor_::Transcript; - - /** - * @brief The verification key is responsible for storing the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. - */ - class VerificationKey - : public StdlibVerificationKey_> { - public: - using NativeVerificationKey = NativeFlavor::VerificationKey; - - /** - * @brief Construct a new Verification Key with stdlib types from a provided native verification key - * - * @param builder - * @param native_key Native verification key from which to extract the precomputed commitments - */ - VerificationKey(CircuitBuilder* builder, const std::shared_ptr& native_key) - { - this->log_circuit_size = FF::from_witness(builder, typename FF::native(native_key->log_circuit_size)); - this->num_public_inputs = FF::from_witness(builder, typename FF::native(native_key->num_public_inputs)); - this->pub_inputs_offset = FF::from_witness(builder, typename FF::native(native_key->pub_inputs_offset)); - - // Generate stdlib commitments (biggroup) from the native counterparts - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_key->get_all())) { - commitment = Commitment::from_witness(builder, native_commitment); - } - }; - - /** - * @brief Deserialize a verification key from a vector of field elements - * - * @param builder - * @param elements - */ - VerificationKey(std::span elements) - { - using Codec = stdlib::StdlibCodec; - - size_t num_frs_read = 0; - - this->log_circuit_size = Codec::template deserialize_from_frs(elements, num_frs_read); - this->num_public_inputs = Codec::template deserialize_from_frs(elements, num_frs_read); - this->pub_inputs_offset = Codec::template deserialize_from_frs(elements, num_frs_read); - - for (Commitment& commitment : this->get_all()) { - commitment = Codec::template deserialize_from_frs(elements, num_frs_read); - } - } - - /** - * @brief Construct a VerificationKey from a set of corresponding witness indices - * - * @param builder - * @param witness_indices - * @return VerificationKey - */ - static VerificationKey from_witness_indices(CircuitBuilder& builder, - const std::span witness_indices) - { - std::vector vk_fields; - vk_fields.reserve(witness_indices.size()); - for (const auto& idx : witness_indices) { - vk_fields.emplace_back(FF::from_witness_index(&builder, idx)); - } - return VerificationKey(vk_fields); - } - -#ifndef NDEBUG - /** - * @brief Get the native verification key corresponding to this stdlib verification key - * - * @return NativeVerificationKey - */ - NativeVerificationKey get_value() const - { - NativeVerificationKey native_vk; - native_vk.log_circuit_size = static_cast(this->log_circuit_size.get_value()); - native_vk.num_public_inputs = static_cast(this->num_public_inputs.get_value()); - native_vk.pub_inputs_offset = static_cast(this->pub_inputs_offset.get_value()); - for (auto [commitment, native_commitment] : zip_view(this->get_all(), native_vk.get_all())) { - native_commitment = commitment.get_value(); - } - return native_vk; - } -#endif - }; - - // Reuse the VerifierCommitments from Ultra - using VerifierCommitments = UltraFlavor::VerifierCommitments_; - using VKAndHash = VKAndHash_; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp index 802b5cdf42fd..925545c7f6df 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_zk_flavor.hpp @@ -34,8 +34,6 @@ class UltraZKFlavor : public UltraFlavor { static constexpr size_t BATCHED_RELATION_PARTIAL_LENGTH = UltraFlavor::BATCHED_RELATION_PARTIAL_LENGTH + 1; static_assert(BATCHED_RELATION_PARTIAL_LENGTH == Curve::LIBRA_UNIVARIATES_LENGTH, "LIBRA_UNIVARIATES_LENGTH must be equal to UltraZKFlavor::BATCHED_RELATION_PARTIAL_LENGTH"); - static constexpr size_t num_frs_comm = FrCodec::calc_num_fields(); - static constexpr size_t num_frs_fr = FrCodec::calc_num_fields(); // Override AllEntities to use ZK version (includes gemini_masking_poly via MaskingEntities) template using AllEntities = UltraFlavor::AllEntities_; @@ -86,150 +84,5 @@ class UltraZKFlavor : public UltraFlavor { /* 12. Shplonk Q commitment */ (num_frs_comm) + /* 13. KZG W commitment */ (num_frs_comm); } - - /** - * @brief Derived class that defines proof structure for Ultra zero knowledge proofs, as well as supporting - * functions. - * TODO(https://github.com/AztecProtocol/barretenberg/issues/1355): Deduplicate zk flavor transcripts. - */ - class Transcript_ : public UltraFlavor::Transcript { - public: - using Base = UltraFlavor::Transcript::Base; - // Override sumcheck_evaluations to use the correct size for ZK flavor - std::array sumcheck_evaluations; - // Note: we have a different vector of univariates because the degree for ZK flavors differs - std::vector> zk_sumcheck_univariates; - Commitment libra_concatenation_commitment; - FF libra_sum; - FF libra_claimed_evaluation; - Commitment libra_grand_sum_commitment; - Commitment libra_quotient_commitment; - FF libra_concatenation_eval; - FF libra_shifted_grand_sum_eval; - FF libra_grand_sum_eval; - FF libra_quotient_eval; - Commitment hiding_polynomial_commitment; - FF hiding_polynomial_eval; - - Transcript_() = default; - - static std::shared_ptr prover_init_empty() - { - auto transcript = Base::prover_init_empty(); - return std::static_pointer_cast(transcript); - }; - - static std::shared_ptr verifier_init_empty(const std::shared_ptr& transcript) - { - auto verifier_transcript = Base::verifier_init_empty(transcript); - return std::static_pointer_cast(verifier_transcript); - }; - - /** - * @brief Takes a FULL Ultra proof and deserializes it into the public member variables - * that compose the structure. Must be called in order to access the structure of the - * proof. - * - */ - void deserialize_full_transcript(size_t num_public_inputs, size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) - { - // take current proof and put them into the struct - size_t num_frs_read = 0; - auto& proof_data = this->proof_data; - for (size_t i = 0; i < num_public_inputs; ++i) { - this->public_inputs.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - hiding_polynomial_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_l_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_r_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_o_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_counts_comm = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_read_tags_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->w_4_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->lookup_inverses_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->z_perm_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_concatenation_commitment = - Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_sum = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - for (size_t i = 0; i < virtual_log_n; ++i) { - zk_sumcheck_univariates.push_back( - Base::template deserialize_from_buffer>( - proof_data, num_frs_read)); - } - libra_claimed_evaluation = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->sumcheck_evaluations = - Base::template deserialize_from_buffer>(proof_data, num_frs_read); - libra_grand_sum_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_commitment = Base::template deserialize_from_buffer(proof_data, num_frs_read); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - this->gemini_fold_comms.push_back( - Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - this->gemini_fold_evals.push_back(Base::template deserialize_from_buffer(proof_data, num_frs_read)); - } - libra_concatenation_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_shifted_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_grand_sum_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - libra_quotient_eval = Base::template deserialize_from_buffer(proof_data, num_frs_read); - this->shplonk_q_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - - this->kzg_w_comm = Base::template deserialize_from_buffer(proof_data, num_frs_read); - } - - /** - * @brief Serializes the structure variables into a FULL Ultra proof. Should be called - * only if deserialize_full_transcript() was called and some transcript variable was - * modified. - * - */ - void serialize_full_transcript(size_t virtual_log_n = CONST_PROOF_SIZE_LOG_N) - { - auto& proof_data = this->proof_data; - size_t old_proof_length = proof_data.size(); - proof_data.clear(); // clear proof_data so the rest of the function can replace it - for (const auto& input : this->public_inputs) { - Base::serialize_to_buffer(input, proof_data); - } - Base::serialize_to_buffer(hiding_polynomial_commitment, proof_data); - Base::serialize_to_buffer(this->w_l_comm, proof_data); - Base::serialize_to_buffer(this->w_r_comm, proof_data); - Base::serialize_to_buffer(this->w_o_comm, proof_data); - Base::serialize_to_buffer(this->lookup_read_counts_comm, proof_data); - Base::serialize_to_buffer(this->lookup_read_tags_comm, proof_data); - Base::serialize_to_buffer(this->w_4_comm, proof_data); - Base::serialize_to_buffer(this->lookup_inverses_comm, proof_data); - Base::serialize_to_buffer(this->z_perm_comm, proof_data); - Base::serialize_to_buffer(libra_concatenation_commitment, proof_data); - Base::serialize_to_buffer(libra_sum, proof_data); - - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(zk_sumcheck_univariates[i], proof_data); - } - Base::serialize_to_buffer(libra_claimed_evaluation, proof_data); - - Base::serialize_to_buffer(this->sumcheck_evaluations, proof_data); - Base::serialize_to_buffer(libra_grand_sum_commitment, proof_data); - Base::serialize_to_buffer(libra_quotient_commitment, proof_data); - for (size_t i = 0; i < virtual_log_n - 1; ++i) { - Base::serialize_to_buffer(this->gemini_fold_comms[i], proof_data); - } - for (size_t i = 0; i < virtual_log_n; ++i) { - Base::serialize_to_buffer(this->gemini_fold_evals[i], proof_data); - } - Base::serialize_to_buffer(libra_concatenation_eval, proof_data); - Base::serialize_to_buffer(libra_shifted_grand_sum_eval, proof_data); - Base::serialize_to_buffer(libra_grand_sum_eval, proof_data); - Base::serialize_to_buffer(libra_quotient_eval, proof_data); - Base::serialize_to_buffer(this->shplonk_q_comm, proof_data); - Base::serialize_to_buffer(this->kzg_w_comm, proof_data); - - BB_ASSERT_EQ(proof_data.size(), old_proof_length); - } - }; - using Transcript = Transcript_; - using VKAndHash = UltraFlavor::VKAndHash; }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/multilinear_batching/multilinear_batching_claims.hpp b/barretenberg/cpp/src/barretenberg/multilinear_batching/multilinear_batching_claims.hpp index 2e05389173ce..376dc2a1ecb0 100644 --- a/barretenberg/cpp/src/barretenberg/multilinear_batching/multilinear_batching_claims.hpp +++ b/barretenberg/cpp/src/barretenberg/multilinear_batching/multilinear_batching_claims.hpp @@ -70,18 +70,17 @@ template struct MultilinearBatchingVerifierClaim { /** * @brief Tag claim components and hash. + * @tparam Codec The codec used for serde + * @tparam HashFn The hash function to use */ - template - FF hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, T& transcript) const + template FF hash_with_origin_tagging(const OriginTag& tag) const { - using Codec = typename T::Codec; + constexpr bool in_circuit = Curve::is_stdlib_type; std::vector claim_elements; - const OriginTag tag = bb::extract_transcript_tag(transcript); - // Tag, serialize, and append auto append_tagged = [&](const U& component) { - auto frs = bb::tag_and_serialize(component, tag); + auto frs = bb::tag_and_serialize(component, tag); claim_elements.insert(claim_elements.end(), frs.begin(), frs.end()); }; @@ -97,10 +96,22 @@ template struct MultilinearBatchingVerifierClaim { append_tagged(shifted_commitment); // Sanitize free witness tags before hashing - bb::unset_free_witness_tags(claim_elements); + bb::unset_free_witness_tags(claim_elements); // Hash the tagged elements directly - return T::HashFunction::hash(claim_elements); + return HashFn::hash(claim_elements); + } + + /** + * @brief Convenience overload that accepts a transcript and extracts the tag internally + * @tparam TranscriptType The transcript type (Codec and HashFn deduced automatically) + * @param transcript The transcript to extract the origin tag from + * @returns The hash of the claim + */ + template FF hash_with_origin_tagging(const TranscriptType& transcript) const + { + const OriginTag tag = bb::extract_transcript_tag(transcript); + return hash_with_origin_tagging(tag); } }; diff --git a/barretenberg/cpp/src/barretenberg/numeric/bitop/sparse_form.hpp b/barretenberg/cpp/src/barretenberg/numeric/bitop/sparse_form.hpp index 4eefeef1cf8d..80ae180d0979 100644 --- a/barretenberg/cpp/src/barretenberg/numeric/bitop/sparse_form.hpp +++ b/barretenberg/cpp/src/barretenberg/numeric/bitop/sparse_form.hpp @@ -6,9 +6,9 @@ #pragma once #include "barretenberg/common/throw_or_abort.hpp" +#include #include #include -#include #include #include "../uint256/uint256.hpp" @@ -157,7 +157,6 @@ template class sparse_int { private: std::array limbs; uint64_t value; - uint64_t sparse_value; }; } // namespace bb::numeric diff --git a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp index f9ab4c62f29b..f4517d1b6681 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/eccvm_verifier/eccvm_recursive_flavor.hpp @@ -79,6 +79,7 @@ class ECCVMRecursiveFlavor { */ class VerificationKey : public StdlibVerificationKey_, + NativeVerificationKey, VKSerializationMode::NO_METADATA> { public: Commitment pcs_g1_identity; @@ -110,10 +111,9 @@ class ECCVMRecursiveFlavor { * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * * @param domain_separator - * @param transcript + * @param tag */ - FF hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + FF hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/README.md b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/README.md new file mode 100644 index 000000000000..fd60018e615e --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/README.md @@ -0,0 +1,245 @@ +# SHA-256 Circuit Implementation + +## Overview + +Circuit-friendly implementation of the SHA-256 compression function using lookup tables and sparse form arithmetic. + +## Contents + +1. [API](#api) +2. [Algorithm](#algorithm) - Standard SHA-256 compression function +3. [Implementation](#implementation) - Circuit-specific techniques +4. [Testing](#testing) - Verification approach +5. [Security Considerations](#security-considerations) + +## API + +### `sha256_block` + +```cpp +static std::array sha256_block( + const std::array& h_init, + const std::array& input +); +``` + +Applies the SHA-256 compression function to a single 512-bit message block. + +**Parameters:** +- `h_init`: The 8-word (256-bit) hash state. For the first block, the standard SHA-256 IV. For subsequent blocks, the output of the previous compression. +- `input`: The 16-word (512-bit) message block to compress. + +**Output:** The updated 8-word hash state. + +**Note:** Only the compression function is implemented since this is all that is needed to support `Sha256Compression` constraints in DSL. + +## Algorithm + +Standard SHA-256 compression function (FIPS 180-4): + +### Message Schedule Extension + +Extends the 16 input words to 64 words: + +``` +W[i] = σ₁(W[i-2]) + W[i-7] + σ₀(W[i-15]) + W[i-16] (mod 2³²) for i = 16..63 + +σ₀(x) = ROTR⁷(x) ⊕ ROTR¹⁸(x) ⊕ SHR³(x) +σ₁(x) = ROTR¹⁷(x) ⊕ ROTR¹⁹(x) ⊕ SHR¹⁰(x) +``` + +### Compression Rounds + +64 rounds updating 8 working variables (a, b, c, d, e, f, g, h) initialized from h_init: + +``` +T1 = h + Σ₁(e) + Ch(e,f,g) + K[i] + W[i] +T2 = Σ₀(a) + Maj(a,b,c) +(a,b,c,d,e,f,g,h) = (T1+T2, a, b, c, d+T1, e, f, g) + +Σ₀(a) = ROTR²(a) ⊕ ROTR¹³(a) ⊕ ROTR²²(a) +Σ₁(e) = ROTR⁶(e) ⊕ ROTR¹¹(e) ⊕ ROTR²⁵(e) +Ch(e,f,g) = (e ∧ f) ⊕ (¬e ∧ g) +Maj(a,b,c) = (a ∧ b) ⊕ (a ∧ c) ⊕ (b ∧ c) + +K[i] are the 64 SHA-256 round constants (FIPS 180-4) +``` + +### Final Addition + +``` +H[i] = H[i] + (a,b,c,d,e,f,g,h)[i] (mod 2³²) +``` + +## Implementation + +### Sparse Form Encoding + +XOR and bitwise operations are expensive in circuits. We use "sparse form" to convert them to additions: + +- Each bit is stored in its own digit (base B, one digit per bit position) +- XOR of bits becomes addition of digits (mod B) + +Two sparse bases are used across three contexts: + +- **Base-28** for Choose + Σ₁: encodes `7*rotation_sum + (e + 2f + 3g)` per digit +- **Base-16** for Majority + Σ₀: encodes `4*rotation_sum + (a + b + c)` per digit +- **Base-16** for Message Extension (σ₀ + σ₁): encodes `4*σ₀_digit + σ₁_digit` per digit + +The multipliers (4 and 7) separate the two XOR results or the rotation result from the Boolean function encoding within each digit. + +### Handling Rotations + +In sparse form, rotating a value by `r` bits is equivalent to multiplying by `B^r` (where B is the base). However, 32-bit values are decomposed into 3-4 limbs by the input lookup tables, and this creates complications: + +**Simple case (contiguous after rotation):** When a limb's bits remain contiguous and don't land at bit position 0, the rotation is handled by multiplying that limb by the appropriate power of B. These are called "rotation multipliers" or "rotation coefficients" (e.g., `get_choose_rotation_multipliers()`). + +**Complex cases (handled by lookup tables):** +- **Bits split across limb boundary:** When rotation causes a limb's bits to wrap around (some bits go to high positions, others to low), a lookup table is used to compute this contribution. +- **Bits land at position 0:** Handled by existing lookup tables containing the raw limb values. + +The input lookup tables (C3 column) return the combined contribution from all complex cases, while the code applies rotation multipliers to handle the simple cases. Correction factors reconcile any coefficient mismatches between limbs. + +See the appendix "Detailed Example: Choose + Σ₁" for a worked example showing how Σ₁'s three rotations (6, 11, 25) are split between multipliers and lookup tables. + +### Lookup Tables + +Six lookup tables handle the sparse form conversions: + +| Operation | Input Table | Output Table | +|-----------|-------------|--------------| +| Message extension (σ₀ + σ₁) | `SHA256_WITNESS_INPUT` | `SHA256_WITNESS_OUTPUT` | +| Choose + Σ₁ | `SHA256_CH_INPUT` | `SHA256_CH_OUTPUT` | +| Majority + Σ₀ | `SHA256_MAJ_INPUT` | `SHA256_MAJ_OUTPUT` | + +Input tables decompose 32-bit values into sparse limbs. Output tables normalize the sparse sum back to a 32-bit result. + +See `plookup_tables/sha256.hpp` for detailed table documentation. + +### Function Mapping + +| Algorithm | Implementation | +|-----------|----------------| +| σ₀ + σ₁ | `extend_witness()` | +| Σ₁ + Ch | `choose_with_sigma1()` | +| Σ₀ + Maj | `majority_with_sigma0()` | +| mod 2³² addition | `add_normalize()` | + +## Testing + +Correctness is verified through a layered approach: + +### Native Implementation (`crypto/sha256/`) + +The native (non-circuit) SHA-256 implementation (full hash algorithm with padding) is tested against official NIST test vectors, verifying the core algorithm is correct. + +### Circuit Implementation (`stdlib/hash/sha256/`) + +The circuit tests use NIST vectors with manual padding - `sha256.test.cpp` tests `sha256_block` directly against NIST vectors ("abc" and 56-byte messages), manually constructing padded blocks to verify the compression function produces correct output. + +### Differential Fuzzing (`sha256.fuzzer.cpp`) + +The fuzzer generates random inputs and compares circuit output against native output for the compression function. This provides broad coverage without requiring exhaustive test vectors. + +--- + +## Appendix: Detailed Example: Choose + Σ₁ + +This section walks through how `choose_with_sigma1()` (in `sha256.cpp`) computes `Σ₁(e) + Ch(e,f,g)` using sparse form. + +### What We're Computing + +Per bit position `i`: +- `Σ₁(e)_i = e[i-6] ⊕ e[i-11] ⊕ e[i-25]` (3-way XOR of rotated bits) +- `Ch(e,f,g)_i = (e_i ∧ f_i) ⊕ (¬e_i ∧ g_i)` (if e=1, take f; else take g) + +### Sparse Encoding for Choose + +The Choose function can be computed from the weighted sum `e + 2f + 3g`: + +| e | f | g | e + 2f + 3g | Ch(e,f,g) | +|---|---|---|-------------|-----------| +| 0 | 0 | 0 | 0 | 0 | +| 1 | 0 | 0 | 1 | 0 | +| 0 | 1 | 0 | 2 | 0 | +| 0 | 0 | 1 | 3 | 1 | +| 1 | 1 | 0 | 3 | 1 | +| 1 | 0 | 1 | 4 | 0 | +| 0 | 1 | 1 | 5 | 1 | +| 1 | 1 | 1 | 6 | 1 | + +The weighted sum uniquely determines Ch(e,f,g) (note: 3 maps to 1 in both cases where it occurs). + +This encoding is implemented in `get_choose_output_table()` (`plookup_tables/sha256.hpp`). + +### Combined Encoding + +We encode both Σ₁ and Ch in a single base-28 digit: + +``` +digit = 7 * rotation_sum + (e + 2f + 3g) +``` + +Where: +- `rotation_sum` ∈ {0,1,2,3} (sum of 3 rotated bits) +- `e + 2f + 3g` ∈ {0,1,2,3,4,5,6} +- Max digit = 7*3 + 6 = 27, hence base-28 + +The constant 7 is `SPARSE_MULT` in `choose_with_sigma1()`. + +### Handling Rotations + +Computing Σ₁(e) requires rotating e by 6, 11, and 25 bits. In sparse form, rotation by `r` bits is multiplication by `B^r` (where B=28). But there's a complication: the 32-bit value is split into three limbs by the input table: + +- L0: bits 0-10 (11 bits) +- L1: bits 11-21 (11 bits) +- L2: bits 22-31 (10 bits) + +See `get_choose_input_table()` in `plookup_tables/sha256.hpp` for limb structure. + +When a rotation is applied, each limb's bits either: +- **Stay contiguous (not at bit 0)**: Handled by multiplying by `B^(new_position)`. +- **Split across boundary OR land at bit 0**: Handled by the lookup table (C3). + +For Σ₁'s three rotations (6, 11, 25), each limb behaves differently: + +| Limb | Bits | Rot 6 | Rot 11 | Rot 25 | +|------|------|-------|--------|--------| +| L0 | 0-10 | splits | contiguous | contiguous | +| L1 | 11-21 | contiguous | lands at 0 | contiguous | +| L2 | 22-31 | contiguous | contiguous | splits | + +The **rotation coefficients** `c0, c1, c2` (from `get_choose_rotation_multipliers()`) combine the contiguous contributions for each limb. The **lookup table (C3)** handles the splits and lands-at-0 cases. + +### Correction Factors + +For efficiency, we want to compute `e_sparse + SPARSE_MULT * Σ₁(e_sparse)` in one operation via multiplication of the full value by a single coefficient. Recall `e_sparse = L0 + B^11*L1 + B^22*L2`. The coefficient `c0` is designed for L0, so we compute `e_sparse * c0`. But this gives incorrect coefficients for L1 and L2: + +- L0 gets coefficient `c0` (correct) +- L1 gets coefficient `B^11 * c0` (incorrect - should be `c1`) +- L2 gets coefficient `B^22 * c0` (incorrect - should be `c2`) + +Two correction factors fix this: +- **L1 correction**: `δ1 = c1 - B^11 * c0`, baked into C3 via `limb1_table_correction` in `get_choose_input_table()` +- **L2 correction**: `δ2 = c2 - B^22 * c0`, returned by `get_choose_rotation_multipliers()` and applied explicitly in `choose_with_sigma1()` + +### Computation Flow + +1. **Input table lookup** (`SHA256_CH_INPUT` via `get_choose_input_table()`): + - C1: Reconstructs normal form (for constraint) + - C2[0]: Full sparse form `e_sparse` + - C2[2]: Individual limb `S2` (for L2 correction) + - C3[0]: Split-boundary rotation contributions with L1 correction baked in + +2. **Build sparse sum** (in `choose_with_sigma1()`): + ``` + result = SPARSE_MULT * C3[0] // split-boundary rotations (with L1 correction) + + e_sparse * (c0 * SPARSE_MULT + 1) // contiguous rotations + e itself + + S2 * (δ2 * SPARSE_MULT) // L2 coefficient correction + + 2 * f_sparse + 3 * g_sparse // Choose encoding + ``` + The `c0 * SPARSE_MULT + 1` term computes both `SPARSE_MULT * Σ₁(e)` and `e` from the same multiplication. + +3. **Output table lookup** (`SHA256_CH_OUTPUT` via `get_choose_output_table()`): Each base-28 digit maps to `Σ₁_bit + Ch_bit` + +The lookup tables encode all the bit-level logic, so the circuit only performs field additions and table lookups. diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp index deedabba48d8..24d03511c697 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.cpp @@ -4,6 +4,23 @@ // external_2: { status: not started, auditors: [], commit: } // ===================== +/** + * @file sha256.cpp + * @brief Circuit implementation of SHA-256 compression function using lookup tables. + * + * This implementation uses "sparse form" representations to efficiently compute SHA-256 operations: + * - XOR operations become additions in sparse form (one digit per bit) + * - Rotations become coefficient multiplications or table lookups + * - Boolean functions (Choose, Majority) are computed via lookup tables + * + * Two sparse bases are used: + * - Base-28 for Choose + Σ₁: encodes 7*rotation + (e + 2f + 3g) + * - Base-16 for Majority + Σ₀: encodes 4*rotation + (a + b + c) + * - Base-16 with pre-rotated limbs for message schedule extension + * + * See plookup_tables/sha256.hpp for the details of the lookup tables used herein. + */ + #include "sha256.hpp" #include "barretenberg/stdlib/primitives/field/field.hpp" @@ -16,28 +33,21 @@ using namespace bb; namespace bb::stdlib { using namespace bb::plookup; -constexpr size_t get_num_blocks(const size_t num_bits) -{ - constexpr size_t extra_bits = 65UL; - - return ((num_bits + extra_bits) / 512UL) + ((num_bits + extra_bits) % 512UL > 0); -} - -template void SHA256::prepare_constants(std::array, 8>& input) -{ - for (size_t i = 0; i < 8; i++) { - input[i] = init_constants[i]; - } -} - +/** + * @brief Convert a 32-bit value to sparse limbs form for message schedule extension. + * + * Uses SHA256_WITNESS_INPUT lookup table to decompose input into sparse limbs and + * pre-rotated correction terms needed for σ₀/σ₁ computation. + * + * See get_witness_extension_input_table() in plookup_tables/sha256.hpp for table structure. + */ template -SHA256::sparse_witness_limbs SHA256::convert_witness(const field_t& w) +SHA256::sparse_witness_limbs SHA256::convert_witness(const field_t& input) { - typedef field_t field_pt; - - sparse_witness_limbs result(w); + using field_pt = field_t; - const auto lookup = plookup_read::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, w); + sparse_witness_limbs result(input); + const auto lookup = plookup_read::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, input); result.sparse_limbs = std::array{ lookup[ColumnIdx::C2][0], @@ -45,7 +55,7 @@ SHA256::sparse_witness_limbs SHA256::convert_witness(const fie lookup[ColumnIdx::C2][2], lookup[ColumnIdx::C2][3], }; - result.rotated_limbs = std::array{ + result.rotated_limb_corrections = std::array{ lookup[ColumnIdx::C3][0], lookup[ColumnIdx::C3][1], lookup[ColumnIdx::C3][2], @@ -56,21 +66,37 @@ SHA256::sparse_witness_limbs SHA256::convert_witness(const fie return result; } +/** + * @brief Extend the 16-word message block to 64 words per SHA-256 specification. + * + * SHA-256 Spec (FIPS 180-4, Section 6.2.2): + * W[i] = σ₁(W[i-2]) + W[i-7] + σ₀(W[i-15]) + W[i-16] (mod 2³²) for i = 16..63 + * + * Uses base-16 sparse form to compute σ₀ + σ₁ efficiently via lookup tables, + * then adds W[i-7] and W[i-16] and reduces mod 2³². + * + * @param w_in The 16 input message words (512 bits total) + * @return 64 extended message schedule words + */ template std::array, 64> SHA256::extend_witness(const std::array, 16>& w_in) { - typedef field_t field_pt; + using field_pt = field_t; Builder* ctx = w_in[0].get_context(); std::array::sparse_witness_limbs, 64> w_sparse; + + // Populate initial 16 words from input (sparse form computed lazily as needed) for (size_t i = 0; i < 16; ++i) { w_sparse[i] = SHA256::sparse_witness_limbs(w_in[i]); - if (!ctx && w_in[i].get_context()) { + // Extract builder context from inputs + if ((ctx == nullptr) && w_in[i].get_context()) { ctx = w_in[i].get_context(); } } + // Compute extended words W[16..63] for (size_t i = 16; i < 64; ++i) { auto& w_left = w_sparse[i - 15]; auto& w_right = w_sparse[i - 2]; @@ -82,6 +108,8 @@ std::array, 64> SHA256::extend_witness(const std::arra w_right = convert_witness(w_right.normal); } + // Compute the (partially) rotated sparse limbs for σ₀ + // Note: remaining contributions accounted for via w_left.rotated_limb_corrections std::array left{ w_left.sparse_limbs[0] * left_multipliers[0], w_left.sparse_limbs[1] * left_multipliers[1], @@ -89,6 +117,8 @@ std::array, 64> SHA256::extend_witness(const std::arra w_left.sparse_limbs[3] * left_multipliers[3], }; + // Compute the (partially) rotated sparse limbs for σ₁ + // Note: remaining contributions accounted for via w_right.rotated_limb_corrections std::array right{ w_right.sparse_limbs[0] * right_multipliers[0], w_right.sparse_limbs[1] * right_multipliers[1], @@ -96,33 +126,52 @@ std::array, 64> SHA256::extend_witness(const std::arra w_right.sparse_limbs[3] * right_multipliers[3], }; + // Compute σ₀(w[i-15]) in sparse form where σ₀(x) = (x >>> 7) ⊕ (x >>> 18) ⊕ (x >> 3). + // Each sparse digit holds the sum of contributions from the three rotation/shift operations (digit value in + // {0,1,2,3}). The fr(4) scaling positions σ₀'s contribution in the upper 2 bits of each 4-bit digit slot: when + // combined with σ₁ (unscaled, in lower 2 bits), each digit becomes 4*σ₀_digit + σ₁_digit ∈ [0,15]. const field_pt left_xor_sparse = - left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limbs[1]) * fr(4); + left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limb_corrections[1]) * fr(4); + // Compute σ₀(w[i-15]) + σ₁(w[i-2]) in sparse form where σ₁(x) = (x >>> 17) ⊕ (x >>> 19) ⊕ (x >> 10). const field_pt xor_result_sparse = right[0] .add_two(right[1], right[2]) - .add_two(right[3], w_right.rotated_limbs[2]) - .add_two(w_right.rotated_limbs[3], left_xor_sparse); + .add_two(right[3], w_right.rotated_limb_corrections[2]) + .add_two(w_right.rotated_limb_corrections[3], left_xor_sparse); + // Normalize the sparse representation via a lookup to obtain the genuine result σ₀ + σ₁ field_pt xor_result = plookup_read::read_from_1_to_2_table(SHA256_WITNESS_OUTPUT, xor_result_sparse); - // TODO NORMALIZE WITH RANGE CHECK - + // Compute W[i] = σ₁(W[i-2]) + W[i-7] + σ₀(W[i-15]) + W[i-16] field_pt w_out_raw = xor_result.add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal); + + // Natively compute value reduced to 32 bits per SHA-256 spec + const uint64_t w_out_modded = w_out_raw.get_value().from_montgomery_form().data[0] & 0xffffffffULL; + field_pt w_out; if (w_out_raw.is_constant()) { - w_out = field_pt(ctx, fr(w_out_raw.get_value().from_montgomery_form().data[0] & (uint64_t)0xffffffffULL)); - + w_out = field_pt(ctx, fr(w_out_modded)); } else { - w_out = witness_t( - ctx, fr(w_out_raw.get_value().from_montgomery_form().data[0] & (uint64_t)0xffffffffULL)); + // Establish w_out as the 32-bit reduction of w_out_raw via w_out_raw = w_out + divisor*2^32 + w_out = witness_t(ctx, fr(w_out_modded)); static constexpr fr inv_pow_two = fr(2).pow(32).invert(); - // If we multiply the field elements by constants separately and then subtract, then the divisor is - // going to be in a normalized state right after subtraction and the call to .normalize() won't add - // gates + field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two; field_pt w_out_inv_pow_two = w_out * inv_pow_two; field_pt divisor = w_out_raw_inv_pow_two - w_out_inv_pow_two; + // AUDITTODO: The 3-bit constraint is currently necessary due to unconstrained inputs. + // + // w_out_raw = xor_result + w[i-16] + w[i-7], where: + // - xor_result: 32-bit (from SHA256_WITNESS_OUTPUT lookup) + // - w[i-16]: At i=16, this is input[0] which is NEVER lookup-constrained + // - w[i-7]: At i=16..20, this is input[9..13] which are used BEFORE being converted + // + // If all three inputs were 32-bit constrained, max sum = 3*(2^32-1), so divisor <= 2 + // and a 2-bit constraint would suffice. However, with unconstrained inputs (~35 bits + // per the add_normalize overflow slack), divisor could exceed 7 and reject the proof. + // + // This constraint implicitly enforces input bounds - if we add explicit 32-bit input + // constraints (see AUDITTODO in sha256_block), this could be tightened to 2 bits. divisor.create_range_constraint(3); } @@ -130,104 +179,186 @@ std::array, 64> SHA256::extend_witness(const std::arra } std::array w_extended; - for (size_t i = 0; i < 64; ++i) { w_extended[i] = w_sparse[i].normal; } return w_extended; } +/** + * @brief Convert a field element to sparse form for use in the Choose function + * + * Performs a lookup to convert a normal 32-bit value to its base-28 sparse representation. + * Base 28 is required because the Choose lookup table index formula is `7 × rotation_sum + (e + 2f + 3g)`, + * where rotation_sum ranges 0-3 and the weighted sum (e + 2f + 3g) ranges 0-6, giving max index 27. + * + * @param input The field element to convert (expected to be a 32-bit value) + * @return sparse_value containing both normal and sparse representations + */ template -SHA256::sparse_value SHA256::map_into_choose_sparse_form(const field_t& e) +SHA256::sparse_value SHA256::map_into_choose_sparse_form(const field_t& input) { sparse_value result; - result.normal = e; - result.sparse = plookup_read::read_from_1_to_2_table(SHA256_CH_INPUT, e); + result.normal = input; + result.sparse = plookup_read::read_from_1_to_2_table(SHA256_CH_INPUT, input); return result; } +/** + * @brief Convert a field element to sparse form for use in the Majority function + * + * Performs a lookup to convert a normal 32-bit value to its base-16 sparse representation. + * Base 16 is required because the Majority lookup table index formula is `4 × rotation_sum + (a + b + c)`, + * where rotation_sum ranges 0-3 and (a + b + c) ranges 0-3, giving max index 15. + * + * @param input The field element to convert (expected to be a 32-bit value) + * @return sparse_value containing both normal and sparse representations + */ template -SHA256::sparse_value SHA256::map_into_maj_sparse_form(const field_t& e) +SHA256::sparse_value SHA256::map_into_maj_sparse_form(const field_t& input) { sparse_value result; - result.normal = e; - result.sparse = plookup_read::read_from_1_to_2_table(SHA256_MAJ_INPUT, e); + result.normal = input; + result.sparse = plookup_read::read_from_1_to_2_table(SHA256_MAJ_INPUT, input); return result; } +/** + * @brief Compute Σ₁(e) + Ch(e,f,g) for SHA-256 compression rounds. + * + * Combines two operations efficiently using base-28 sparse form: + * - Σ₁(e) = (e >>> 6) ^ (e >>> 11) ^ (e >>> 25) + * - Ch(e,f,g) = (e & f) ^ (~e & g) + * + * Sparse encoding: 7*[rotations] + [e + 2f + 3g], where factor 7 separates rotation + * contributions (0-3) from Choose encoding (0-6) within each base-28 digit. + * + * See get_choose_input_table() in plookup_tables/sha256.hpp for the mathematical derivation. + * + * @param e Input/output: e.normal read, e.sparse populated as side effect + * @param f Input: must have .sparse already populated + * @param g Input: must have .sparse already populated + * @return Σ₁(e) + Ch(e,f,g) as a constrained 32-bit field element + */ template -field_t SHA256::choose(sparse_value& e, const sparse_value& f, const sparse_value& g) +field_t SHA256::choose_with_sigma1(sparse_value& e, const sparse_value& f, const sparse_value& g) { - typedef field_t field_pt; + using field_pt = field_t; + // Separates rotation contributions (0-3) from Choose encoding (0-6) in each base-28 digit + constexpr fr SPARSE_MULT = fr(7); const auto lookup = plookup_read::get_lookup_accumulators(SHA256_CH_INPUT, e.normal); const auto rotation_coefficients = sha256_tables::get_choose_rotation_multipliers(); field_pt rotation_result = lookup[ColumnIdx::C3][0]; - e.sparse = lookup[ColumnIdx::C2][0]; + field_pt sparse_L2 = lookup[ColumnIdx::C2][2]; - field_pt sparse_limb_3 = lookup[ColumnIdx::C2][2]; - - // where is the middle limb used - field_pt xor_result = (rotation_result * fr(7)) - .add_two(e.sparse * (rotation_coefficients[0] * fr(7) + fr(1)), - sparse_limb_3 * (rotation_coefficients[2] * fr(7))); + // Compute e + 7*Σ₁(e) in sparse form + field_pt xor_result = (rotation_result * SPARSE_MULT) + .add_two(e.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)), + sparse_L2 * (rotation_coefficients[2] * SPARSE_MULT)); + // Add 2f + 3g to get e + 7*Σ₁(e) + 2f + 3g (each digit in 0..27) field_pt choose_result_sparse = xor_result.add_two(f.sparse + f.sparse, g.sparse + g.sparse + g.sparse); + // Normalize via lookup: each digit maps to Σ₁(e)_i + Ch(e,f,g)_i field_pt choose_result = plookup_read::read_from_1_to_2_table(SHA256_CH_OUTPUT, choose_result_sparse); return choose_result; } +/** + * @brief Compute Σ₀(a) + Maj(a,b,c) for SHA-256 compression rounds. + * + * Combines two operations efficiently using base-16 sparse form: + * - Σ₀(a) = (a >>> 2) ^ (a >>> 13) ^ (a >>> 22) + * - Maj(a,b,c) = (a & b) ^ (a & c) ^ (b & c) + * + * Sparse encoding: 4*[rotations] + [a + b + c], where factor 4 separates rotation + * contributions (0-3) from Majority encoding (0-3) within each base-16 digit. + * + * See get_majority_input_table() in plookup_tables/sha256.hpp for the mathematical derivation. + * + * @param a Input/output: a.normal read, a.sparse populated as side effect + * @param b Input: must have .sparse already populated + * @param c Input: must have .sparse already populated + * @return Σ₀(a) + Maj(a,b,c) as a constrained 32-bit field element + */ template -field_t SHA256::majority(sparse_value& a, const sparse_value& b, const sparse_value& c) +field_t SHA256::majority_with_sigma0(sparse_value& a, const sparse_value& b, const sparse_value& c) { - typedef field_t field_pt; + using field_pt = field_t; + // Separates rotation contributions (0-3) from Majority encoding (0-3) in each base-16 digit + constexpr fr SPARSE_MULT = fr(4); const auto lookup = plookup_read::get_lookup_accumulators(SHA256_MAJ_INPUT, a.normal); const auto rotation_coefficients = sha256_tables::get_majority_rotation_multipliers(); - field_pt rotation_result = - lookup[ColumnIdx::C3][0]; // last index of first row gives accumulating sum of "non-trival" wraps + // first row of 3rd column gives accumulating sum of "non-trivial" wraps + field_pt rotation_result = lookup[ColumnIdx::C3][0]; a.sparse = lookup[ColumnIdx::C2][0]; - // use these values to compute trivial wraps somehow - field_pt sparse_accumulator_2 = lookup[ColumnIdx::C2][1]; + field_pt sparse_L1_acc = lookup[ColumnIdx::C2][1]; - field_pt xor_result = (rotation_result * fr(4)) - .add_two(a.sparse * (rotation_coefficients[0] * fr(4) + fr(1)), - sparse_accumulator_2 * (rotation_coefficients[1] * fr(4))); + // Compute a + 4*Σ₀(a) in sparse form + field_pt xor_result = (rotation_result * SPARSE_MULT) + .add_two(a.sparse * (rotation_coefficients[0] * SPARSE_MULT + fr(1)), + sparse_L1_acc * (rotation_coefficients[1] * SPARSE_MULT)); + // Add b + c to get a + 4*Σ₀(a) + b + c (each digit in 0..15) field_pt majority_result_sparse = xor_result.add_two(b.sparse, c.sparse); + // Normalize via lookup: each digit maps to Σ₀(a)_i + Maj(a,b,c)_i field_pt majority_result = plookup_read::read_from_1_to_2_table(SHA256_MAJ_OUTPUT, majority_result_sparse); return majority_result; } +/** + * @brief Compute (a + b) mod 2^32 with circuit constraints. + * + * Used throughout SHA-256 to add 32-bit values and reduce modulo 2^32. + * Constrains: result = a + b - overflow * 2^32, where overflow is range-checked. + * + * @warning result is not explicitly range-constrained here because it is typically used downstream in lookup tables + * (e.g. in choose_with_sigma1, majority_with_sigma0) which implicitly validates the 32-bit range. + */ template field_t SHA256::add_normalize(const field_t& a, const field_t& b) { - typedef field_t field_pt; - typedef witness_t witness_pt; + using field_pt = field_t; + using witness_pt = witness_t; Builder* ctx = a.get_context() ? a.get_context() : b.get_context(); uint256_t sum = a.get_value() + b.get_value(); - - uint256_t normalized_sum = static_cast(sum.data[0]); + uint256_t normalized_sum = static_cast(sum.data[0]); // lower 32 bits if (a.is_constant() && b.is_constant()) { return field_pt(ctx, normalized_sum); } - field_pt overflow = witness_pt(ctx, fr((sum - normalized_sum) >> 32)); - - field_pt result = a.add_two(b, overflow * field_pt(ctx, -fr((uint64_t)(1ULL << 32ULL)))); - // Has to be a byte? + fr overflow_value = fr((sum - normalized_sum) >> 32); + field_pt overflow = witness_pt(ctx, overflow_value); + + field_pt result = a.add_two(b, overflow * field_pt(ctx, -fr(1ULL << 32ULL))); + // AUDITTODO: The 3-bit constraint is necessary. Analysis of call sites: + // + // Compression loop (lines ~439-450): + // ch, maj outputs: max = 2(2^32-1) each (lookup output digits are 0-2, see sha256.hpp:79) + // temp1 = ch + h.normal + (w[i] + K[i]) (max = 2(2^32-1) + (2^32-1) + 2(2^32-1) = 5(2^32-1)) + // add_normalize(d.normal, temp1): max sum = (2^32-1) + 5(2^32-1) = 6(2^32-1), overflow <= 5 + // add_normalize(temp1, maj): max sum = 5(2^32-1) + 2(2^32-1) = 7(2^32-1), overflow <= 6 + // => Requires 3 bits (to represent overflow values 0-7) + // + // Final output (lines ~456-463): + // add_normalize(X.normal, h_init[i]): both 32-bit, max sum = 2(2^32-1), overflow <= 1 + // => Could use 1 bit, but we use 3 for uniformity + // + // The 3-bit constraint is correct and necessary for the compression loop. + // Consider adding argument overflow_bits to customize constraint size and make it more explicit. overflow.create_range_constraint(3); return result; } @@ -248,36 +379,66 @@ template std::array, 8> SHA256::sha256_block(const std::array, 8>& h_init, const std::array, 16>& input) { - typedef field_t field_pt; - /** - * Initialize round variables with previous block output - **/ + using field_pt = field_t; + + // AUDITTODO: Input range constraints are not explicitly enforced here. Analysis shows: + // + // - h_init[1,2,5,6] are immediately lookup-constrained (32-bit) via map_into_*_sparse_form + // - h_init[0,4] are lookup-constrained in round 0 via choose/majority functions + // - h_init[3,7] are used in round 0 arithmetic BEFORE being lookup-constrained (they cycle + // through working variables and get constrained in later rounds) + // - input[0] is NEVER lookup-constrained (only used as w[i-16] and in round 0, both additions) + // - input[1..8] are lookup-constrained during extend_witness as w[i-15] (at i=16..23) + // - input[9..13] are used as w[i-7] at i=16..20 BEFORE being constrained (converted later + // as w[i-15] at i=24..28) + // - input[14..15] are lookup-constrained during extend_witness as w[i-2] (at i=16..17) + // + // The overflow constraints in extend_witness (3-bit divisor) and add_normalize (3-bit overflow) + // provide weak implicit bounds. If unconstrained inputs exceed ~35 bits, these constraints + // will reject the proof. This is safe (rejects invalid proofs) but not ideal. + // + // This is not practically exploitable (finding inputs that produce a specific hash still + // requires ~2^208 work), but deviates from the SHA-256 spec which assumes 32-bit words. + // + // Potential fix: Use lookups (cheaper than create_range_constraint, ~1 gate vs multiple): + // - For h_init[3], h_init[7]: convert immediately via map_into_*_sparse_form instead of + // wrapping in sparse_value(). The lookup constrains the input as a side effect. + // - For input[0]: add a lookup in extend_witness via convert_witness() or SHA256_WITNESS_INPUT. + // - For input[9..13]: reorder extend_witness to convert these before use, or add explicit lookups. + // + // After fixing, the extend_witness divisor constraint could be tightened to 2 bits. + /** - * We can initialize round variables a and c and put value h_init[0] and - * h_init[4] in .normal, and don't do lookup for maj_output, because majority and choose - * functions will do that in the next step - **/ - sparse_value a = sparse_value(h_init[0]); + * Initialize round variables with previous block output. + * Note: We delay converting `a` and `e` into their respective sparse forms because it's done as part of the + * majority and choose functions in the first round. + */ + sparse_value a = sparse_value(h_init[0]); // delay conversion to maj sparse form auto b = map_into_maj_sparse_form(h_init[1]); auto c = map_into_maj_sparse_form(h_init[2]); sparse_value d = sparse_value(h_init[3]); - sparse_value e = sparse_value(h_init[4]); + sparse_value e = sparse_value(h_init[4]); // delay conversion to choose sparse form auto f = map_into_choose_sparse_form(h_init[5]); auto g = map_into_choose_sparse_form(h_init[6]); sparse_value h = sparse_value(h_init[7]); - /** - * Extend witness - **/ - const auto w = extend_witness(input); + // Extend the 16-word message block to 64 words per SHA-256 specification + const std::array, 64> w = extend_witness(input); /** - * Apply SHA-256 compression function to the message schedule - **/ - // As opposed to standard sha description - Maj and Choose functions also include required rotations for round + * Apply SHA-256 compression function to the message schedule. + * + * Standard SHA-256 round: + * T1 = h + Σ1(e) + Ch(e,f,g) + K[i] + W[i] + * T2 = Σ0(a) + Maj(a,b,c) + * h,g,f,e,d,c,b,a = g,f,e,d+T1,c,b,a,T1+T2 + * + * NOTE: Order-dependent side effects below. choose_with_sigma1() populates e.sparse (subsequently copied to f). + * majority_with_sigma0() populates a.sparse (subsequently copied to b). Do not reorder relative to f=e or b=a. + */ for (size_t i = 0; i < 64; ++i) { - auto ch = choose(e, f, g); - auto maj = majority(a, b, c); + auto ch = choose_with_sigma1(e, f, g); + auto maj = majority_with_sigma0(a, b, c); auto temp1 = ch.add_two(h.normal, w[i] + fr(round_constants[i])); h = g; @@ -290,9 +451,7 @@ std::array, 8> SHA256::sha256_block(const std::array output; output[0] = add_normalize(a.normal, h_init[0]); output[1] = add_normalize(b.normal, h_init[1]); @@ -303,12 +462,9 @@ std::array, 8> SHA256::sha256_block(const std::array @@ -19,42 +18,77 @@ namespace bb::stdlib { template class SHA256 { using field_ct = field_t; - using byte_array_ct = byte_array; - struct sparse_ch_value { - field_ct normal; - field_ct sparse; - field_ct rot6; - field_ct rot11; - field_ct rot25; - }; - struct sparse_maj_value { - field_ct normal; - field_ct sparse; - field_ct rot2; - field_ct rot13; - field_ct rot22; - }; - - static constexpr uint64_t init_constants[8]{ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; static constexpr fr base{ 16 }; + /** + * @brief Multipliers for computing σ₀ during message schedule extension + * + * σ₀(x) = (x >>> 7) ⊕ (x >>> 18) ⊕ (x >> 3) + * + * These multipliers handle rotations that keep the limb within the 32-bit word boundary. + * Rotations that split a limb across the wrap boundary (i.e., rot7 for limb 1) cannot be + * represented as a simple scalar and are instead handled via rotated_limb_corrections from the lookup table. + * + * limb 0) bits [0..2]; pos_0 = 0 + * - rotation by 7: 16^(-7 + pos_0 mod 32) = 16^(32 - 7 + 0) + * - rotation by 18: 16^(-18 + pos_0 mod 32) = 16^(32 - 18 + 0) + * - shift by 3: no contribution (all 3 bits shifted out) + * limb 1) bits [3..9]; pos_1 = 3 + * - rotation by 7: not representable as scalar; accounted for in rotated_limb_corrections[1] + * - rotation by 18: 16^(-18 + pos_1 mod 32) = 16^(32 - 18 + 3) + * - shift by 3: 16^(pos_1 - 3) = 16^0 = 1 + * limb 2) bits [10..17]; pos_2 = 10 + * - rotation by 7: 16^(-7 + pos_2) = 16^(10 - 7) + * - rotation by 18: 16^(-18 + pos_2 mod 32) = 16^(32 - 18 + 10) + * - shift by 3: 16^(pos_2 - 3) = 16^(10 - 3) + * limb 3) bits [18..31]; pos_3 = 18 + * - rotation by 7: 16^(-7 + pos_3) = 16^(18 - 7) + * - rotation by 18: 16^(-18 + pos_3) = 16^(18 - 18) = 1 + * - shift by 3: 16^(pos_3 - 3) = 16^(18 - 3) + */ static constexpr std::array left_multipliers{ - (base.pow(32 - 7) + base.pow(32 - 18)), - (base.pow(32 - 18 + 3) + 1), - (base.pow(32 - 18 + 10) + base.pow(10 - 7) + base.pow(10 - 3)), - (base.pow(18 - 7) + base.pow(18 - 3) + 1), + base.pow(32 - 7) + base.pow(32 - 18), // limb 0: rot7 + rot18 + base.pow(32 - 18 + 3) + fr(1), // limb 1: rot18 + shift3 + base.pow(10 - 7) + base.pow(32 - 18 + 10) + base.pow(10 - 3), // limb 2: rot7 + rot18 + shift3 + base.pow(18 - 7) + fr(1) + base.pow(18 - 3), // limb 3: rot7 + rot18 + shift3 }; + /** + * @brief Multipliers for computing σ₁ during message schedule extension + * + * σ₁(x) = (x >>> 17) ⊕ (x >>> 19) ⊕ (x >> 10) + * + * These multipliers handle rotations that keep the limb within the 32-bit word boundary. + * Rotations that split a limb across the wrap boundary (i.e., rot17 for limb 2, rot19 for limb 3) + * cannot be represented as a simple scalar and are instead handled via rotated_limb_corrections from the lookup + * table. + * + * limb 0) bits [0..2]; pos_0 = 0 + * - rotation by 17: 16^(-17 + pos_0 mod 32) = 16^(32 - 17 + 0) + * - rotation by 19: 16^(-19 + pos_0 mod 32) = 16^(32 - 19 + 0) + * - shift by 10: no contribution (all 3 bits shifted out) + * limb 1) bits [3..9]; pos_1 = 3 + * - rotation by 17: 16^(-17 + pos_1 mod 32) = 16^(32 - 17 + 3) + * - rotation by 19: 16^(-19 + pos_1 mod 32) = 16^(32 - 19 + 3) + * - shift by 10: no contribution (all 7 bits shifted out) + * limb 2) bits [10..17]; pos_2 = 10 + * - rotation by 17: not representable as scalar; accounted for in rotated_limb_corrections[2] + * - rotation by 19: 16^(-19 + pos_2 mod 32) = 16^(32 - 19 + 10) + * - shift by 10: 16^(pos_2 - 10) = 16^0 = 1 + * limb 3) bits [18..31]; pos_3 = 18 + * - rotation by 17: 16^(-17 + pos_3) = 16^(18 - 17) + * - rotation by 19: not representable as scalar; accounted for in rotated_limb_corrections[3] + * - shift by 10: 16^(pos_3 - 10) = 16^(18 - 10) + */ static constexpr std::array right_multipliers{ - base.pow(32 - 17) + base.pow(32 - 19), - base.pow(32 - 17 + 3) + base.pow(32 - 19 + 3), - base.pow(32 - 19 + 10) + fr(1), - base.pow(18 - 17) + base.pow(18 - 10), + base.pow(32 - 17) + base.pow(32 - 19), // limb 0: rot17 + rot19 + base.pow(32 - 17 + 3) + base.pow(32 - 19 + 3), // limb 1: rot17 + rot19 + base.pow(32 - 19 + 10) + fr(1), // limb 2: rot19 + shift10 + base.pow(18 - 17) + base.pow(18 - 10), // limb 3: rot17 + shift10 }; - static constexpr uint64_t round_constants[64]{ + static constexpr std::array round_constants{ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, @@ -66,28 +100,26 @@ template class SHA256 { }; struct sparse_witness_limbs { sparse_witness_limbs(const field_ct& in = 0) - { - normal = in; - has_sparse_limbs = false; - } + : normal(in) + {} sparse_witness_limbs(const sparse_witness_limbs& other) = default; sparse_witness_limbs(sparse_witness_limbs&& other) = default; - sparse_witness_limbs& operator=(const sparse_witness_limbs& other) = default; sparse_witness_limbs& operator=(sparse_witness_limbs&& other) = default; + ~sparse_witness_limbs() = default; field_ct normal; std::array sparse_limbs; - std::array rotated_limbs; + std::array rotated_limb_corrections; bool has_sparse_limbs = false; }; struct sparse_value { sparse_value(const field_ct& in = 0) + : normal(in) { - normal = in; if (normal.is_constant()) { sparse = field_ct(in.get_context(), bb::fr(numeric::map_into_sparse_form<16>(uint256_t(in.get_value()).data[0]))); @@ -96,22 +128,21 @@ template class SHA256 { sparse_value(const sparse_value& other) = default; sparse_value(sparse_value&& other) = default; - sparse_value& operator=(const sparse_value& other) = default; sparse_value& operator=(sparse_value&& other) = default; + ~sparse_value() = default; field_ct normal; field_ct sparse; }; - static void prepare_constants(std::array& input); - static sparse_witness_limbs convert_witness(const field_ct& w); + static sparse_witness_limbs convert_witness(const field_ct& input); - static field_ct choose(sparse_value& e, const sparse_value& f, const sparse_value& g); + static field_ct choose_with_sigma1(sparse_value& e, const sparse_value& f, const sparse_value& g); - static field_ct majority(sparse_value& a, const sparse_value& b, const sparse_value& c); - static sparse_value map_into_choose_sparse_form(const field_ct& e); - static sparse_value map_into_maj_sparse_form(const field_ct& e); + static field_ct majority_with_sigma0(sparse_value& a, const sparse_value& b, const sparse_value& c); + static sparse_value map_into_choose_sparse_form(const field_ct& input); + static sparse_value map_into_maj_sparse_form(const field_ct& input); static field_ct add_normalize(const field_ct& a, const field_ct& b); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp index 3e5e2cc73b27..613ca213510b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/hash/sha256/sha256.test.cpp @@ -3,6 +3,8 @@ #include "barretenberg/common/test.hpp" #include "barretenberg/crypto/sha256/sha256.hpp" #include "barretenberg/numeric/random/engine.hpp" +#include "barretenberg/stdlib/primitives/group/test_utils.hpp" +#include "barretenberg/stdlib_circuit_builders/mega_circuit_builder.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" using namespace bb; @@ -12,9 +14,17 @@ namespace { auto& engine = numeric::get_debug_randomness(); } -using Builder = UltraCircuitBuilder; -using field_ct = field_t; -using witness_ct = witness_t; +#define STDLIB_TYPE_ALIASES \ + using Builder = TypeParam; \ + using field_ct = field_t; \ + using witness_ct = witness_t; + +template class Sha256Test : public ::testing::Test {}; + +using BuilderTypes = ::testing::Types; +TYPED_TEST_SUITE(Sha256Test, BuilderTypes); + +using bb::stdlib::test_utils::check_circuit_and_gate_count; /** * @brief Test sha256_block against NIST vector one ("abc") @@ -26,8 +36,10 @@ using witness_ct = witness_t; * - Padded block: "abc" + 0x80 + zeros + 64-bit length (24 bits) * - Single block since message fits in 55 bytes */ -TEST(stdlib_sha256, test_sha256_block_NIST_vector_one) +TYPED_TEST(Sha256Test, BlockNistVectorOne) { + STDLIB_TYPE_ALIASES + auto builder = Builder(); // SHA-256 initial hash values (FIPS 180-4 section 5.3.3) @@ -69,16 +81,14 @@ TEST(stdlib_sha256, test_sha256_block_NIST_vector_one) // Run circuit compression auto circuit_output = SHA256::sha256_block(h_init, block); - // Verify circuit correctness - EXPECT_TRUE(CircuitChecker::check(builder)); - // Compare outputs for (size_t i = 0; i < 8; i++) { uint32_t circuit_val = static_cast(uint256_t(circuit_output[i].get_value())); EXPECT_EQ(circuit_val, EXPECTED[i]) << "Circuit mismatch at index " << i; } - info("sha256_block num gates = ", builder.get_num_finalized_gates_inefficient()); + check_circuit_and_gate_count(builder, 6679); + EXPECT_EQ(builder.get_tables_size(), 35992); } /** @@ -91,8 +101,10 @@ TEST(stdlib_sha256, test_sha256_block_NIST_vector_one) * - Block 1: message bytes + padding bit (0x80) * - Block 2: zeros + 64-bit length (448 bits = 0x1c0) */ -TEST(stdlib_sha256, test_sha256_block_NIST_vector_two) +TYPED_TEST(Sha256Test, BlockNistVectorTwo) { + STDLIB_TYPE_ALIASES + auto builder = Builder(); // SHA-256 initial hash values @@ -147,16 +159,14 @@ TEST(stdlib_sha256, test_sha256_block_NIST_vector_two) auto circuit_output = SHA256::sha256_block(h_mid, block2); - // Verify circuit correctness - EXPECT_TRUE(CircuitChecker::check(builder)); - // Compare outputs for (size_t i = 0; i < 8; i++) { uint32_t circuit_val = static_cast(uint256_t(circuit_output[i].get_value())); EXPECT_EQ(circuit_val, EXPECTED[i]) << "Circuit mismatch at index " << i; } - info("sha256_block (2 blocks) num gates = ", builder.get_num_finalized_gates_inefficient()); + check_circuit_and_gate_count(builder, 10611); + EXPECT_EQ(builder.get_tables_size(), 35992); } /** @@ -167,8 +177,10 @@ TEST(stdlib_sha256, test_sha256_block_NIST_vector_two) * circuit failure. * */ -TEST(stdlib_sha256, test_extend_witness_constraints) +TYPED_TEST(Sha256Test, ExtendWitnessTamperingFailure) { + STDLIB_TYPE_ALIASES + BB_DISABLE_ASSERTS(); auto builder = Builder(); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/test_utils/tamper_proof.hpp b/barretenberg/cpp/src/barretenberg/stdlib/test_utils/tamper_proof.hpp index fff462826145..dabaed23ec4c 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/test_utils/tamper_proof.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/test_utils/tamper_proof.hpp @@ -1,10 +1,11 @@ #pragma once #include "barretenberg/commitment_schemes/ipa/ipa.hpp" -#include "barretenberg/eccvm/eccvm_flavor.hpp" #include "barretenberg/flavor/flavor_concepts.hpp" -#include +#include "barretenberg/flavor/test_utils/proof_structures.hpp" + namespace bb { + enum class TamperType { MODIFY_SUMCHECK_UNIVARIATE, // Tamper with coefficients of a Sumcheck Round Univariate MODIFY_SUMCHECK_EVAL, // Tamper with a multilinear evaluation of an entity @@ -13,151 +14,125 @@ enum class TamperType { END }; +/** + * @brief Compute the proof length for re-exporting after tampering + * @details Excludes IPA proof length for flavors with IPA accumulator since it's added again by export_proof + */ +template size_t compute_proof_length_for_export(size_t num_public_inputs) +{ + size_t num_frs = Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + num_public_inputs; + if constexpr (HasIPAAccumulator) { + num_frs -= IPA_PROOF_LENGTH; + } + return num_frs; +} + /** * @brief Test method that provides several ways to tamper with a proof. * TODO(https://github.com/AztecProtocol/barretenberg/issues/1298): Currently, several tests are failing due to * challenges not being re-computed after tampering. We need to extend this tool to allow for more elaborate tampering. - * - * @tparam InnerProver - * @tparam InnerFlavor - * @tparam ProofType - * @param inner_prover - * @param inner_proof - * @param type */ template void tamper_with_proof(InnerProver& inner_prover, ProofType& inner_proof, TamperType type) { - using InnerFF = typename InnerFlavor::FF; + using FF = typename InnerFlavor::FF; static constexpr size_t FIRST_WITNESS_INDEX = InnerFlavor::NUM_PRECOMPUTED_ENTITIES; - // Deserialize the transcript into the struct so that we can tamper it - auto num_public_inputs = inner_prover.prover_instance->num_public_inputs(); - inner_prover.transcript->deserialize_full_transcript(num_public_inputs); + // Deserialize proof into structured form + StructuredProof structured_proof; + const auto num_public_inputs = inner_prover.prover_instance->num_public_inputs(); + const size_t log_n = + InnerFlavor::USE_PADDING ? CONST_PROOF_SIZE_LOG_N : inner_prover.prover_instance->log_dyadic_size(); + structured_proof.deserialize(inner_prover.transcript->test_get_proof_data(), num_public_inputs, log_n); + // Apply tampering based on type switch (type) { - case TamperType::MODIFY_SUMCHECK_UNIVARIATE: { - InnerFF random_value = InnerFF::random_element(); - // Preserve the S_0(0) + S_0(1) = target_total_sum = 0, but the check S_0(u_0) = S_1(0) + S_1(1) would fail whp. - // The branching is due to the Flavor structure. - if constexpr (!InnerFlavor::HasZK) { - inner_prover.transcript->sumcheck_univariates[0].value_at(0) += random_value; - inner_prover.transcript->sumcheck_univariates[0].value_at(1) -= random_value; - } else { - inner_prover.transcript->zk_sumcheck_univariates[0].value_at(0) += random_value; - inner_prover.transcript->zk_sumcheck_univariates[0].value_at(1) -= random_value; - } + FF delta = FF::random_element(); + // Preserve S_0(0) + S_0(1) = target_total_sum, but S_0(u_0) = S_1(0) + S_1(1) will fail + structured_proof.sumcheck_univariates[0].value_at(0) += delta; + structured_proof.sumcheck_univariates[0].value_at(1) -= delta; break; } - case TamperType::MODIFY_SUMCHECK_EVAL: - // Corrupt the evaluation of the first witness. Captures that the check full_honk_purported_value = - // round.target_total_sum is performed in-circuit. - inner_prover.transcript->sumcheck_evaluations[FIRST_WITNESS_INDEX] = InnerFF::random_element(); + structured_proof.sumcheck_evaluations[FIRST_WITNESS_INDEX] = FF::random_element(); break; - case TamperType::MODIFY_Z_PERM_COMMITMENT: - // Tamper with the commitment to z_perm. - inner_prover.transcript->z_perm_comm = inner_prover.transcript->z_perm_comm * InnerFF::random_element(); + structured_proof.z_perm_comm = structured_proof.z_perm_comm * FF::random_element(); break; - - case TamperType::MODIFY_GEMINI_WITNESS: { - InnerFF random_scalar = InnerFF::random_element(); - // Tamper with the first fold commitment. In non-ZK cases, could only be captured by the pairing check. - inner_prover.transcript->gemini_fold_comms[0] = inner_prover.transcript->gemini_fold_comms[0] * random_scalar; - inner_prover.transcript->gemini_fold_evals[0] *= 0; + case TamperType::MODIFY_GEMINI_WITNESS: + structured_proof.gemini_fold_comms[0] = structured_proof.gemini_fold_comms[0] * FF::random_element(); + structured_proof.gemini_fold_evals[0] = FF::zero(); break; - } - case TamperType::END: { + case TamperType::END: break; } - } - // Serialize transcript - // As inner_proof is extracted with export_proof, the internal values of inner_prover.transcript are reset - // Therefore, if we were to call export_proof without overriding num_frs_written and proof_start, the proof would - // be empty. This is a hack, we should probably have a better way of tampering with proofs. - // TODO(https://github.com/AztecProtocol/barretenberg/issues/1411) Use std::unordered map in Transcript so that we - // can access/modify elements of a proof more easily - inner_prover.transcript->serialize_full_transcript(); - size_t num_frs = InnerFlavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + num_public_inputs; - if (HasIPAAccumulator) { - // Exclude the IPA points from the proof - they are added again by export_proof - num_frs -= IPA_PROOF_LENGTH; - } - inner_prover.transcript->test_set_proof_parsing_state(0, num_frs); - - // Extract the tampered proof + // Serialize back and re-export the tampered proof + structured_proof.serialize(inner_prover.transcript->test_get_proof_data(), log_n); + inner_prover.transcript->test_set_proof_parsing_state( + 0, compute_proof_length_for_export(num_public_inputs)); inner_proof = inner_prover.export_proof(); } /** - * @brief Tamper with a proof by modifying the first pairing point to be P+G (where G is the generator). - * This keeps the point on the curve but makes the proof invalid. - * + * @brief Tamper with a proof by modifying curve points directly in the proof vector. + * @param inner_proof The proof vector to tamper with + * @param end_of_proof If true, tamper with the last commitment; if false, tamper with the first pairing point */ -template +template void tamper_with_proof(ProofType& inner_proof, bool end_of_proof) { using Commitment = typename InnerFlavor::Curve::AffineElement; using FF = typename InnerFlavor::FF; using ProofFF = typename ProofType::value_type; - - if (!end_of_proof) { + using Codec = typename InnerFlavor::Transcript::Codec; + + static constexpr size_t NUM_FRS_PER_COMMITMENT = Codec::template calc_num_fields(); + + if (end_of_proof) { + // Tamper with the last commitment in the proof + size_t offset = inner_proof.size() - NUM_FRS_PER_COMMITMENT; + auto element_span = std::span{ inner_proof }.subspan(offset, NUM_FRS_PER_COMMITMENT); + auto commitment = Codec::template deserialize_from_fields(element_span); + commitment = commitment * FF(2); + auto serialized = Codec::serialize_to_fields(commitment); + std::copy(serialized.begin(), serialized.end(), inner_proof.begin() + static_cast(offset)); + } else { // Tamper with the first pairing point (P0) by adding the generator - // The number of field elements per point depends on the curve: - // - BN254: 8 field elements (4 limbs per coordinate) - // - Grumpkin: 2 field elements (1 per coordinate) - constexpr size_t FRS_PER_POINT = Commitment::PUBLIC_INPUTS_SIZE; - constexpr size_t NUM_LIMB_BITS = bb::stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION; + // Pairing points use a different encoding (reconstruct_from_public) than regular commitments + static constexpr size_t FRS_PER_POINT = Commitment::PUBLIC_INPUTS_SIZE; + static constexpr size_t NUM_LIMB_BITS = bb::stdlib::NUM_LIMB_BITS_IN_FIELD_SIMULATION; if (inner_proof.size() >= FRS_PER_POINT) { - // Deserialize P0 from proof using the native reconstruct_from_public method + // Deserialize P0 using the native reconstruct_from_public method std::array p0_limbs; std::copy_n(inner_proof.begin(), FRS_PER_POINT, p0_limbs.begin()); Commitment P0 = Commitment::reconstruct_from_public(p0_limbs); // Tamper: P0 + G (still on curve, but invalid for verification) - Commitment tampered_point = P0 + Commitment::one(); + Commitment tampered = P0 + Commitment::one(); - // Manually serialize tampered point back to proof based on curve type + // Serialize back based on curve type if constexpr (FRS_PER_POINT == 8) { - // BN254: Serialize using bigfield representation (4 limbs of 68 bits each per coordinate) + // BN254: 4 limbs per coordinate constexpr uint256_t LIMB_MASK = (uint256_t(1) << NUM_LIMB_BITS) - 1; - - uint256_t x_val = uint256_t(tampered_point.x); - inner_proof[0] = ProofFF(x_val & LIMB_MASK); - inner_proof[1] = ProofFF((x_val >> NUM_LIMB_BITS) & LIMB_MASK); - inner_proof[2] = ProofFF((x_val >> (2 * NUM_LIMB_BITS)) & LIMB_MASK); - inner_proof[3] = ProofFF((x_val >> (3 * NUM_LIMB_BITS)) & LIMB_MASK); - - uint256_t y_val = uint256_t(tampered_point.y); - inner_proof[4] = ProofFF(y_val & LIMB_MASK); - inner_proof[5] = ProofFF((y_val >> NUM_LIMB_BITS) & LIMB_MASK); - inner_proof[6] = ProofFF((y_val >> (2 * NUM_LIMB_BITS)) & LIMB_MASK); - inner_proof[7] = ProofFF((y_val >> (3 * NUM_LIMB_BITS)) & LIMB_MASK); + uint256_t x_val = uint256_t(tampered.x); + uint256_t y_val = uint256_t(tampered.y); + for (size_t i = 0; i < 4; ++i) { + inner_proof[i] = ProofFF((x_val >> (i * NUM_LIMB_BITS)) & LIMB_MASK); + inner_proof[i + 4] = ProofFF((y_val >> (i * NUM_LIMB_BITS)) & LIMB_MASK); + } } else if constexpr (FRS_PER_POINT == 2) { - // Grumpkin: Serialize directly (1 field element per coordinate) - inner_proof[0] = ProofFF(tampered_point.x); - inner_proof[1] = ProofFF(tampered_point.y); + // Grumpkin: 1 field element per coordinate + inner_proof[0] = ProofFF(tampered.x); + inner_proof[1] = ProofFF(tampered.y); } else { static_assert(FRS_PER_POINT == 8 || FRS_PER_POINT == 2, "Unsupported curve: FRS_PER_POINT must be 8 (BN254) or 2 (Grumpkin)"); } } - } else { - // Manually deserialize, modify, and serialize the last commitment contained in the proof. - static constexpr size_t num_frs_comm = FrCodec::calc_num_fields(); - size_t offset = inner_proof.size() - num_frs_comm; - - auto element_frs = std::span{ inner_proof }.subspan(offset, num_frs_comm); - auto last_commitment = NativeTranscript::deserialize(element_frs); - last_commitment = last_commitment * FF(2); - auto last_commitment_reserialized = NativeTranscript::serialize(last_commitment); - std::copy(last_commitment_reserialized.begin(), - last_commitment_reserialized.end(), - inner_proof.begin() + static_cast(offset)); } } + } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp index fcd261e990cc..a5e03a70fe84 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/translator_vm_verifier/translator_recursive_flavor.hpp @@ -103,6 +103,7 @@ class TranslatorRecursiveFlavor { */ class VerificationKey : public StdlibVerificationKey_, + NativeVerificationKey, VKSerializationMode::NO_METADATA> { public: VerificationKey(CircuitBuilder* builder, const std::shared_ptr& native_key) @@ -123,10 +124,9 @@ class TranslatorRecursiveFlavor { * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * * @param domain_separator - * @param transcript + * @param tag */ - FF hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + FF hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sha256.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sha256.hpp index 987f53cb7971..8825470daced 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sha256.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sha256.hpp @@ -6,7 +6,6 @@ #pragma once -#include "barretenberg/crypto/aes128/aes128.hpp" #include "barretenberg/numeric/bitop/pow.hpp" #include "barretenberg/numeric/bitop/rotate.hpp" #include "barretenberg/numeric/bitop/sparse_form.hpp" @@ -14,384 +13,624 @@ #include "sparse.hpp" #include "types.hpp" +/** + * @file sha256.hpp + * @brief Plookup tables for SHA-256 using sparse form representation. + * + * @details This file defines lookup tables that enable efficient SHA-256 computation in circuits by converting bitwise + * XOR operations into arithmetic additions via "sparse form" representation. + * + * In sparse form, each bit of a value is stored in its own base-B digit, where B is chosen large enough to prevent + * overflow when multiple values are added. This allows: + * - XOR of N values becomes addition of N sparse values plus normalization (digit mod 2) + * - Boolean functions (Ch, Maj) to be encoded alongside rotations in a single sparse digit + * + * Each SHA-256 operation proceeds in three stages: + * + * 1. **Input Table** (decomposition): Converts a 32-bit word into sparse limbs + * - Splits the word into limbs at boundaries aligned with rotation parameters + * - Produces sparse form (C2) and rotated sparse form corrections (C3) (for certain limbs) + * + * 2. **Sparse Computation** (in sha256.cpp): Combines sparse limbs via arithmetic + * - Multiplies limbs by rotation coefficients to position them correctly + * - Adds multiple rotated/shifted copies together + * - Result is a sparse value encoding the XOR (and optionally Ch/Maj) result + * + * 3. **Output Table** (normalization): Converts sparse form back to normal form + * + * ## Tables overview + * + * | Operation | Input Table | Output Table | Base | + * |------------------------|--------------------------|--------------------------|------| + * | Message extension σ₀/σ₁| `SHA256_WITNESS_INPUT` | `SHA256_WITNESS_OUTPUT` | 16 | + * | Choose + Σ₁ | `SHA256_CH_INPUT` | `SHA256_CH_OUTPUT` | 28 | + * | Majority + Σ₀ | `SHA256_MAJ_INPUT` | `SHA256_MAJ_OUTPUT` | 16 | + * + * See corresponding table generation functions for details, including choice of base and limb structure. + */ namespace bb::plookup::sha256_tables { -static constexpr uint64_t choose_normalization_table[28]{ - /* xor result = 0 */ - 0, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 - 0, // e + 2f + 3g = 1 => e = 1, f = 0, g = 0 => t = 0 - 0, // e + 2f + 3g = 2 => e = 0, f = 1, g = 0 => t = 0 - 1, // e + 2f + 3g = 3 => e = 0, f = 0, g = 1 OR e = 1, f = 1, g = 0 => t = 1 - 0, // e + 2f + 3g = 4 => e = 1, f = 0, g = 1 => t = 0 - 1, // e + 2f + 3g = 5 => e = 0, f = 1, g = 1 => t = 1 - 1, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 - /* xor result = 1 */ - 1, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 - 1, // e + 2f + 3g = 1 => e = 1, f = 0, g = 0 => t = 0 - 1, // e + 2f + 3g = 2 => e = 0, f = 1, g = 0 => t = 0 - 2, // e + 2f + 3g = 3 => e = 0, f = 0, g = 1 OR e = 1, f = 1, g = 0 => t = 1 - 1, // e + 2f + 3g = 4 => e = 1, f = 0, g = 1 => t = 0 - 2, // e + 2f + 3g = 5 => e = 0, f = 1, g = 1 => t = 1 - 2, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 - /* xor result = 2 */ - 0, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 - 0, // e + 2f + 3g = 1 => e = 1, f = 0, g = 0 => t = 0 - 0, // e + 2f + 3g = 2 => e = 0, f = 1, g = 0 => t = 0 - 1, // e + 2f + 3g = 3 => e = 0, f = 0, g = 1 OR e = 1, f = 1, g = 0 => t = 1 - 0, // e + 2f + 3g = 4 => e = 1, f = 0, g = 1 => t = 0 - 1, // e + 2f + 3g = 5 => e = 0, f = 1, g = 1 => t = 1 - 1, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 - 1, // e + 2f + 3g = 0 => e = 0, f = 0, g = 0 => t = 0 - /* xor result = 3 */ - 1, // e + 2f + 3g = 1 => e = 1, f = 0, g = 0 => t = 0 - 1, // e + 2f + 3g = 2 => e = 0, f = 1, g = 0 => t = 0 - 2, // e + 2f + 3g = 3 => e = 0, f = 0, g = 1 OR e = 1, f = 1, g = 0 => t = 1 - 1, // e + 2f + 3g = 4 => e = 1, f = 0, g = 1 => t = 0 - 2, // e + 2f + 3g = 5 => e = 0, f = 1, g = 1 => t = 1 - 2, // e + 2f + 3g = 6 => e = 1, f = 1, g = 1 => t = 1 +// Sparse form bases used for SHA-256 operations; chosen to prevent overflow during sparse digit addition +static constexpr uint64_t CHOOSE_BASE = 28; +static constexpr uint64_t MAJORITY_BASE = 16; +static constexpr uint64_t WITNESS_EXTENSION_BASE = 16; + +// Bits per lookup in the sparse form normalization tables +static constexpr uint64_t CHOOSE_BITS_PER_LOOKUP = 2; // table size: 28² = 784 +static constexpr uint64_t MAJORITY_BITS_PER_LOOKUP = 3; // table size: 16³ = 4096 +static constexpr uint64_t WITNESS_EXTENSION_BITS_PER_LOOKUP = 3; // table size: 16³ = 4096 + +/** + * @brief Normalization table for combined Σ₁(e) + Ch(e,f,g) computation in base-28 sparse form. + * + * @details + * The Choose function is Ch(e,f,g) = (e & f) ^ (~e & g), i.e., "if e then f else g". + * The Σ₁ function is Σ₁(e) = (e>>>6) ^ (e>>>11) ^ (e>>>25). + * + * Table index = 7 * σ + (e + 2f + 3g), where: + * - σ (0-3): sum of the three rotation bits at position i: (e>>>6)[i] + (e>>>11)[i] + (e>>>25)[i] + * - e, f, g (each 0 or 1): the Ch function input bits at position i + * - e + 2f + 3g (0-6): sparse encoding of Ch inputs + * + * Table output = Σ₁_bit + Ch_bit, where: + * - Σ₁_bit = σ mod 2 (the actual XOR result: 0,2 → 0 and 1,3 → 1) + * - Ch_bit = Ch(e,f,g) + * + * Output range is 0-2, fitting in the normalized sparse digit. + */ +static constexpr uint64_t choose_normalization_table[CHOOSE_BASE]{ + /* σ = 0 (Σ₁ = 0): output = 0 + Ch */ + 0, // e + 2f + 3g = 0 => e=0, f=0, g=0 => Ch=0 + 0, // e + 2f + 3g = 1 => e=1, f=0, g=0 => Ch=0 + 0, // e + 2f + 3g = 2 => e=0, f=1, g=0 => Ch=0 + 1, // e + 2f + 3g = 3 => e=0, f=0, g=1 OR e=1, f=1, g=0 => Ch=1 + 0, // e + 2f + 3g = 4 => e=1, f=0, g=1 => Ch=0 + 1, // e + 2f + 3g = 5 => e=0, f=1, g=1 => Ch=1 + 1, // e + 2f + 3g = 6 => e=1, f=1, g=1 => Ch=1 + /* σ = 1 (Σ₁ = 1): output = 1 + Ch */ + 1, // e + 2f + 3g = 0 => Ch=0 + 1, // e + 2f + 3g = 1 => Ch=0 + 1, // e + 2f + 3g = 2 => Ch=0 + 2, // e + 2f + 3g = 3 => Ch=1 + 1, // e + 2f + 3g = 4 => Ch=0 + 2, // e + 2f + 3g = 5 => Ch=1 + 2, // e + 2f + 3g = 6 => Ch=1 + /* σ = 2 (Σ₁ = 0): output = 0 + Ch */ + 0, // e + 2f + 3g = 0 => Ch=0 + 0, // e + 2f + 3g = 1 => Ch=0 + 0, // e + 2f + 3g = 2 => Ch=0 + 1, // e + 2f + 3g = 3 => Ch=1 + 0, // e + 2f + 3g = 4 => Ch=0 + 1, // e + 2f + 3g = 5 => Ch=1 + 1, // e + 2f + 3g = 6 => Ch=1 + /* σ = 3 (Σ₁ = 1): output = 1 + Ch */ + 1, // e + 2f + 3g = 0 => Ch=0 + 1, // e + 2f + 3g = 1 => Ch=0 + 1, // e + 2f + 3g = 2 => Ch=0 + 2, // e + 2f + 3g = 3 => Ch=1 + 1, // e + 2f + 3g = 4 => Ch=0 + 2, // e + 2f + 3g = 5 => Ch=1 + 2, // e + 2f + 3g = 6 => Ch=1 }; -static constexpr uint64_t majority_normalization_table[16]{ - /* xor result = 0 */ - 0, // a + b + c = 0 => (a & b) ^ (a & c) ^ (b & c) = 0 - 0, // a + b + c = 1 => (a & b) ^ (a & c) ^ (b & c) = 0 - 1, // a + b + c = 2 => (a & b) ^ (a & c) ^ (b & c) = 1 - 1, // a + b + c = 3 => (a & b) ^ (a & c) ^ (b & c) = 1 - /* xor result = 1 */ - 1, - 1, - 2, - 2, - /* xor result = 2 */ - 0, - 0, - 1, - 1, - /* xor result = 3 */ - 1, - 1, - 2, - 2, +/** + * @brief Normalization table for combined Σ₀(a) + Maj(a,b,c) computation in base-16 sparse form. + * + * @details + * The Majority function is Maj(a,b,c) = (a & b) ^ (a & c) ^ (b & c), i.e., the majority bit. + * The Σ₀ function is Σ₀(a) = (a>>>2) ^ (a>>>13) ^ (a>>>22). + * + * Table index = 4 * σ + (a + b + c), where: + * - σ (0-3): sum of the three rotation bits at position i: (a>>>2)[i] + (a>>>13)[i] + (a>>>22)[i] + * - a, b, c (each 0 or 1): the Maj function input bits at position i + * - a + b + c (0-3): sum of Maj inputs + * + * Table output = Σ₀_bit + Maj_bit, where: + * - Σ₀_bit = σ mod 2 (the actual XOR result) + * - Maj_bit = 1 if (a + b + c) >= 2, else 0 (majority vote) + * + * Output range is 0-2, fitting in the normalized sparse digit. + */ +static constexpr uint64_t majority_normalization_table[MAJORITY_BASE]{ + /* σ = 0 (Σ₀ = 0): output = 0 + Maj */ + 0, // a + b + c = 0 => Maj=0 + 0, // a + b + c = 1 => Maj=0 + 1, // a + b + c = 2 => Maj=1 + 1, // a + b + c = 3 => Maj=1 + /* σ = 1 (Σ₀ = 1): output = 1 + Maj */ + 1, // a + b + c = 0 => Maj=0 + 1, // a + b + c = 1 => Maj=0 + 2, // a + b + c = 2 => Maj=1 + 2, // a + b + c = 3 => Maj=1 + /* σ = 2 (Σ₀ = 0): output = 0 + Maj */ + 0, // a + b + c = 0 => Maj=0 + 0, // a + b + c = 1 => Maj=0 + 1, // a + b + c = 2 => Maj=1 + 1, // a + b + c = 3 => Maj=1 + /* σ = 3 (Σ₀ = 1): output = 1 + Maj */ + 1, // a + b + c = 0 => Maj=0 + 1, // a + b + c = 1 => Maj=0 + 2, // a + b + c = 2 => Maj=1 + 2, // a + b + c = 3 => Maj=1 }; -static constexpr uint64_t witness_extension_normalization_table[16]{ - /* xor result = 0 */ - 0, - 1, - 0, - 1, - /* xor result = 1 */ - 1, - 2, - 1, - 2, - /* xor result = 2 */ - 0, - 1, - 0, - 1, - /* xor result = 3 */ - 1, - 2, - 1, - 2, +/** + * @brief Normalization table for message schedule extension (σ₀ and σ₁) in base-16 sparse form. + * + * @details + * Used for the SHA-256 message schedule extension which computes: + * w[i] = σ₁(w[i-2]) + w[i-7] + σ₀(w[i-15]) + w[i-16] + * where: + * σ₀(x) = (x>>>7) ^ (x>>>18) ^ (x>>3) (3-way XOR) + * σ₁(x) = (x>>>17) ^ (x>>>19) ^ (x>>10) (3-way XOR) + * + * At each bit position, the sparse digit encodes two sums: + * - s_0 (0-3): sum of the 3 rotation/shift bits from σ₀ + * - s_1 (0-3): sum of the 3 rotation/shift bits from σ₁ + * + * Table index = 4 * s_0 + s_1 (range 0-15) + * Table output = (s_0 mod 2) + (s_1 mod 2) = σ₀_bit + σ₁_bit + * Output range is 0-2. + */ +static constexpr uint64_t witness_extension_normalization_table[WITNESS_EXTENSION_BASE]{ + /* s_0 = 0 (σ₀_bit = 0): output = 0 + (s_1 mod 2) */ + 0, // s_1 = 0 => σ₁_bit = 0 + 1, // s_1 = 1 => σ₁_bit = 1 + 0, // s_1 = 2 => σ₁_bit = 0 + 1, // s_1 = 3 => σ₁_bit = 1 + /* s_0 = 1 (σ₀_bit = 1): output = 1 + (s_1 mod 2) */ + 1, // s_1 = 0 => σ₁_bit = 0 + 2, // s_1 = 1 => σ₁_bit = 1 + 1, // s_1 = 2 => σ₁_bit = 0 + 2, // s_1 = 3 => σ₁_bit = 1 + /* s_0 = 2 (σ₀_bit = 0): output = 0 + (s_1 mod 2) */ + 0, // s_1 = 0 => σ₁_bit = 0 + 1, // s_1 = 1 => σ₁_bit = 1 + 0, // s_1 = 2 => σ₁_bit = 0 + 1, // s_1 = 3 => σ₁_bit = 1 + /* s_0 = 3 (σ₀_bit = 1): output = 1 + (s_1 mod 2) */ + 1, // s_1 = 0 => σ₁_bit = 0 + 2, // s_1 = 1 => σ₁_bit = 1 + 1, // s_1 = 2 => σ₁_bit = 0 + 2, // s_1 = 3 => σ₁_bit = 1 }; -inline plookup::BasicTable generate_witness_extension_normalization_table(BasicTableId id, const size_t table_index) +/** + * Rotation coefficients for Choose function: Σ₁(e) = (e>>>6) ^ (e>>>11) ^ (e>>>25) + * + * There are three outcomes to consider when a limb is rotated: + * - It stays contiguous: can be represented via multiplication by coefficient = base^(new_bit_position) + * - It splits across the bit-31/0 boundary: must be handled via lookup table correction + * - It lands exactly in bit 0: can be handled via sparse limb base table + * + * Limb structure: L0 = bits 0-10, L1 = bits 11-21, L2 = bits 22-31 + */ +static constexpr bb::fr choose_base{ CHOOSE_BASE }; + +static constexpr bb::fr HANDLED_VIA_TABLE{ 0 }; // indicates handling via lookup table instead of scalar multiplier + +static constexpr std::array choose_rot6_coefficients{ + HANDLED_VIA_TABLE, // splits across boundary + choose_base.pow(11 - 6), // lands at bit 5 + choose_base.pow(22 - 6), // lands at bit 16 +}; + +static constexpr std::array choose_rot11_coefficients{ + choose_base.pow(32 - 11), // lands at bit 21 + HANDLED_VIA_TABLE, // lands at bit 0 can be handled using sparse limb base table + choose_base.pow(22 - 11), // lands at bit 11 +}; + +static constexpr std::array choose_rot25_coefficients{ + choose_base.pow(32 - 25), // lands at bit 7 + choose_base.pow(32 - 25 + 11), // lands at bit 18 + HANDLED_VIA_TABLE, // splits across boundary +}; + +// Combined per-limb rotation coefficients +static constexpr std::array choose_rotation_coefficients{ + choose_rot6_coefficients[0] + choose_rot11_coefficients[0] + choose_rot25_coefficients[0], + choose_rot6_coefficients[1] + choose_rot11_coefficients[1] + choose_rot25_coefficients[1], + choose_rot6_coefficients[2] + choose_rot11_coefficients[2] + choose_rot25_coefficients[2], +}; + +/** + * Rotation coefficients for Majority function: Σ₀(a) = (a>>>2) ^ (a>>>13) ^ (a>>>22) + * + * There are three outcomes to consider when a limb is rotated: + * - It stays contiguous: can be represented via multiplication by coefficient = base^(new_bit_position) + * - It splits across the bit-31/0 boundary: must be handled via lookup table correction + * - It lands exactly in bit 0: can be handled via sparse limb base table + * + * Limb structure: L0 = bits 0-10, L1 = bits 11-21, L2 = bits 22-31 + */ +static constexpr bb::fr majority_base{ MAJORITY_BASE }; + +static constexpr std::array majority_rot2_coefficients{ + HANDLED_VIA_TABLE, // splits across boundary + majority_base.pow(11 - 2), // lands at bit 9 + majority_base.pow(22 - 2), // lands at bit 20 +}; + +static constexpr std::array majority_rot13_coefficients{ + majority_base.pow(32 - 13), // lands at bit 19 + HANDLED_VIA_TABLE, // splits across boundary + majority_base.pow(22 - 13), // lands at bit 9 +}; + +static constexpr std::array majority_rot22_coefficients{ + majority_base.pow(32 - 22), // lands at bit 10 + majority_base.pow(32 - 22 + 11), // lands at bit 21 + HANDLED_VIA_TABLE, // lands at bit 0, handled via sparse limb base table +}; + +// Combined per-limb rotation coefficients +static constexpr std::array majority_rotation_coefficients{ + majority_rot2_coefficients[0] + majority_rot13_coefficients[0] + majority_rot22_coefficients[0], + majority_rot2_coefficients[1] + majority_rot13_coefficients[1] + majority_rot22_coefficients[1], + majority_rot2_coefficients[2] + majority_rot13_coefficients[2] + majority_rot22_coefficients[2], +}; + +/** + * @brief Generates a BasicTable for normalizing witness extension sparse digits. + * + * @details Template: . + * Processes num_bits=3 bits per lookup, giving 16³ = 4096 table entries. + * Normalizing 32 bits requires ceil(32/3) = 11 lookups (see get_witness_extension_output_table). + */ +inline BasicTable generate_witness_extension_normalization_table(BasicTableId id, const size_t table_index) { - return sparse_tables::generate_sparse_normalization_table<16, 3, witness_extension_normalization_table>( - id, table_index); + return sparse_tables::generate_sparse_normalization_table(id, table_index); } +/** + * @brief Generates a BasicTable for normalizing choose sparse digits. + * + * @details Template: . + * Processes num_bits=2 bits per lookup, giving 28² = 784 table entries. + * Normalizing 32 bits requires 32/2 = 16 lookups (see get_choose_output_table). + */ inline BasicTable generate_choose_normalization_table(BasicTableId id, const size_t table_index) { - return sparse_tables::generate_sparse_normalization_table<28, 2, choose_normalization_table>(id, table_index); + return sparse_tables::generate_sparse_normalization_table(id, table_index); } +/** + * @brief Generates a BasicTable for normalizing majority sparse digits. + * + * @details Template: . + * Processes num_bits=3 bits per lookup, giving 16³ = 4096 table entries. + * Normalizing 32 bits requires ceil(32/3) = 11 lookups (see get_majority_output_table). + */ inline BasicTable generate_majority_normalization_table(BasicTableId id, const size_t table_index) { - return sparse_tables::generate_sparse_normalization_table<16, 3, majority_normalization_table>(id, table_index); + return sparse_tables::generate_sparse_normalization_table(id, table_index); } +/** + * @brief Constructs a MultiTable for normalizing witness extension sparse results back to normal form. + * + * @details Allows for normalizing 32 bits using 11 lookups of 3 bits each (ceil(32/3) = 11). + */ inline MultiTable get_witness_extension_output_table(const MultiTableId id = SHA256_WITNESS_OUTPUT) { - const size_t num_entries = 11; + const size_t num_entries = 11; // ceil(32 bits / WITNESS_EXTENSION_BITS_PER_LOOKUP) + const auto slice_size = numeric::pow64(WITNESS_EXTENSION_BASE, WITNESS_EXTENSION_BITS_PER_LOOKUP); - MultiTable table(numeric::pow64(16, 3), 1 << 3, 0, num_entries); + MultiTable table(slice_size, 1ULL << WITNESS_EXTENSION_BITS_PER_LOOKUP, 0, num_entries); table.id = id; for (size_t i = 0; i < num_entries; ++i) { - table.slice_sizes.emplace_back(numeric::pow64(16, 3)); + table.slice_sizes.emplace_back(slice_size); table.basic_table_ids.emplace_back(SHA256_WITNESS_NORMALIZE); table.get_table_values.emplace_back( - &sparse_tables::get_sparse_normalization_values<16, witness_extension_normalization_table>); + &sparse_tables::get_sparse_normalization_values); } return table; } +/** + * @brief Constructs a MultiTable for normalizing choose sparse results back to normal form. + * + * @details Allows for normalizing 32 bits using 16 lookups of 2 bits each (32/2 = 16). + */ inline MultiTable get_choose_output_table(const MultiTableId id = SHA256_CH_OUTPUT) { - const size_t num_entries = 16; + const size_t num_entries = 16; // 32 bits / CHOOSE_BITS_PER_LOOKUP + const auto slice_size = numeric::pow64(CHOOSE_BASE, CHOOSE_BITS_PER_LOOKUP); - MultiTable table(numeric::pow64(28, 2), 1 << 2, 0, num_entries); + MultiTable table(slice_size, 1ULL << CHOOSE_BITS_PER_LOOKUP, 0, num_entries); table.id = id; for (size_t i = 0; i < num_entries; ++i) { - table.slice_sizes.emplace_back(numeric::pow64(28, 2)); + table.slice_sizes.emplace_back(slice_size); table.basic_table_ids.emplace_back(SHA256_CH_NORMALIZE); table.get_table_values.emplace_back( - &sparse_tables::get_sparse_normalization_values<28, choose_normalization_table>); + &sparse_tables::get_sparse_normalization_values); } return table; } +/** + * @brief Constructs a MultiTable for normalizing majority sparse results back to normal form. + * + * @details Allows for normalizing 32 bits using 11 lookups of 3 bits each (ceil(32/3) = 11). + */ inline MultiTable get_majority_output_table(const MultiTableId id = SHA256_MAJ_OUTPUT) { - const size_t num_entries = 11; + const size_t num_entries = 11; // ceil(32 bits / MAJORITY_BITS_PER_LOOKUP) + const auto slice_size = numeric::pow64(MAJORITY_BASE, MAJORITY_BITS_PER_LOOKUP); - MultiTable table(numeric::pow64(16, 3), 1 << 3, 0, num_entries); + MultiTable table(slice_size, 1ULL << MAJORITY_BITS_PER_LOOKUP, 0, num_entries); table.id = id; for (size_t i = 0; i < num_entries; ++i) { - table.slice_sizes.emplace_back(numeric::pow64(16, 3)); + table.slice_sizes.emplace_back(slice_size); table.basic_table_ids.emplace_back(SHA256_MAJ_NORMALIZE); table.get_table_values.emplace_back( - &sparse_tables::get_sparse_normalization_values<16, majority_normalization_table>); + &sparse_tables::get_sparse_normalization_values); } return table; } +/** + * @brief Returns multipliers for computing Σ₀(a) rotations in majority_with_sigma0. + * + * @details When computing rotations, we multiply a_sparse by c0 (L0's coefficient). + * This gives L1 a coefficient of B¹¹·c0, but we need c1. The correction δ = c1 - B¹¹·c0 + * is applied to L1's contribution. L2 correction is handled in the input table (see get_majority_input_table). + * + * @return {c0, L1_correction, 0} where L1_correction = c1 - 16¹¹·c0 + */ inline std::array get_majority_rotation_multipliers() { - constexpr uint64_t base_temp = 16; - auto base = bb::fr(base_temp); - // scaling factors applied to a's sparse limbs, excluding the rotated limb - const std::array rot2_coefficients{ 0, base.pow(11 - 2), base.pow(22 - 2) }; - const std::array rot13_coefficients{ base.pow(32 - 13), 0, base.pow(22 - 13) }; - const std::array rot22_coefficients{ base.pow(32 - 22), base.pow(32 - 22 + 11), 0 }; - - // these are the coefficients that we want - const std::array target_rotation_coefficients{ - rot2_coefficients[0] + rot13_coefficients[0] + rot22_coefficients[0], - rot2_coefficients[1] + rot13_coefficients[1] + rot22_coefficients[1], - rot2_coefficients[2] + rot13_coefficients[2] + rot22_coefficients[2], - }; + bb::fr limb1_correction = + majority_rotation_coefficients[1] - majority_base.pow(11) * majority_rotation_coefficients[0]; - bb::fr column_2_row_1_multiplier = target_rotation_coefficients[0]; - bb::fr column_2_row_2_multiplier = - target_rotation_coefficients[0] * (-bb::fr(base).pow(11)) + target_rotation_coefficients[1]; - - std::array rotation_multipliers = { column_2_row_1_multiplier, column_2_row_2_multiplier, bb::fr(0) }; - return rotation_multipliers; + return { majority_rotation_coefficients[0], limb1_correction, bb::fr(0) }; } -// template +/** + * @brief Returns multipliers for computing Σ₁(e) rotations in choose_with_sigma1. + * + * @details When computing rotations, we multiply e_sparse by c0 (L0's coefficient). + * This gives L2 a coefficient of B²²·c0, but we need c2. The correction δ = c2 - B²²·c0 + * is applied to L2's contribution. L1 correction is handled in the input table (see get_choose_input_table). + * + * @return {c0, 0, L2_correction} where L2_correction = c2 - 28²²·c0 + */ inline std::array get_choose_rotation_multipliers() { - const std::array column_2_row_3_coefficients{ - bb::fr(1), - bb::fr(28).pow(11), - bb::fr(28).pow(22), - }; + bb::fr limb2_correction = choose_rotation_coefficients[2] - choose_base.pow(22) * choose_rotation_coefficients[0]; - // scaling factors applied to a's sparse limbs, excluding the rotated limb - const std::array rot6_coefficients{ bb::fr(0), bb::fr(28).pow(11 - 6), bb::fr(28).pow(22 - 6) }; - const std::array rot11_coefficients{ bb::fr(28).pow(32 - 11), bb::fr(0), bb::fr(28).pow(22 - 11) }; - const std::array rot25_coefficients{ bb::fr(28).pow(32 - 25), bb::fr(28).pow(32 - 25 + 11), bb::fr(0) }; - - // these are the coefficients that we want - const std::array target_rotation_coefficients{ - rot6_coefficients[0] + rot11_coefficients[0] + rot25_coefficients[0], - rot6_coefficients[1] + rot11_coefficients[1] + rot25_coefficients[1], - rot6_coefficients[2] + rot11_coefficients[2] + rot25_coefficients[2], - }; - - bb::fr column_2_row_1_multiplier = bb::fr(1) * target_rotation_coefficients[0]; // why multiply by one? - - // this gives us the correct scaling factor for a0's 1st limb - std::array current_coefficients{ - column_2_row_3_coefficients[0] * column_2_row_1_multiplier, - column_2_row_3_coefficients[1] * column_2_row_1_multiplier, - column_2_row_3_coefficients[2] * column_2_row_1_multiplier, - }; - - bb::fr column_2_row_3_multiplier = -(current_coefficients[2]) + target_rotation_coefficients[2]; - - std::array rotation_multipliers = { column_2_row_1_multiplier, bb::fr(0), column_2_row_3_multiplier }; - return rotation_multipliers; + return { choose_rotation_coefficients[0], bb::fr(0), limb2_correction }; } +/** + * @brief Constructs a MultiTable for decomposing a 32-bit word for message schedule extension. + * + * @details + * ## Table Structure + * + * This table decomposes a 32-bit input into 4 limbs and produces outputs via lookup: + * - C1: Normal form accumulator (x = L0 + L1·2³ + L2·2¹⁰ + L3·2¹⁸) + * - C2: Sparse form of each limb (NOT accumulated) + * - C3: Rotated sparse form for split-boundary corrections (NOT accumulated) + * + * Limb structure (boundaries chosen to work nicely with rotation magnitudes): + * - L0: bits 0-2 (3 bits) + * - L1: bits 3-9 (7 bits) + * - L2: bits 10-17 (8 bits) + * - L3: bits 18-31 (14 bits) + * + * ## Purpose + * + * Used to compute the SHA-256 message schedule extension: + * w[i] = σ₁(w[i-2]) + w[i-7] + σ₀(w[i-15]) + w[i-16] + * where: + * σ₀(x) = (x>>>7) ^ (x>>>18) ^ (x>>3) — applied to w[i-15] + * σ₁(x) = (x>>>17) ^ (x>>>19) ^ (x>>10) — applied to w[i-2] + * + * The same decomposition serves BOTH σ₀ and σ₁, with limb boundaries at 3, 10, 18 + * aligning with the shift/rotation parameters of both functions. + * + */ inline MultiTable get_witness_extension_input_table(const MultiTableId id = SHA256_WITNESS_INPUT) { std::vector column_1_coefficients{ 1, 1 << 3, 1 << 10, 1 << 18 }; - std::vector column_2_coefficients{ 0, 0, 0, 0 }; - std::vector column_3_coefficients{ 0, 0, 0, 0 }; + std::vector column_2_coefficients{ 0, 0, 0, 0 }; // Not accumulated; accessed individually + std::vector column_3_coefficients{ 0, 0, 0, 0 }; // Not accumulated; accessed individually MultiTable table(column_1_coefficients, column_2_coefficients, column_3_coefficients); table.id = id; - table.slice_sizes = { (1 << 3), (1 << 7), (1 << 8), (1 << 18) }; + table.slice_sizes = { (1 << 3), (1 << 7), (1 << 8), (1 << 14) }; + + /** + * Specify the functions defining the rotation to be applied to each of three limbs. This is only for handling the + * limbs which split across the 31/0-bit boundary when rotated. The table handles rotations from both σ₀ and σ₁. + * + * table_rotation = splitting_rotation - limb_start_position + * + * | Limb | Start | Splitting rot | Table rot | + * |------|-------|---------------|--------------| + * | L0 | 0 | (none) | 0 (unused) | + * | L1 | 3 | 7 (from σ₀) | 7 - 3 = 4 | + * | L2 | 10 | 17 (from σ₁) | 17 - 10 = 7 | + * | L3 | 18 | 19 (from σ₁) | 19 - 18 = 1 | + * + */ table.basic_table_ids = { SHA256_WITNESS_SLICE_3, SHA256_WITNESS_SLICE_7_ROTATE_4, SHA256_WITNESS_SLICE_8_ROTATE_7, SHA256_WITNESS_SLICE_14_ROTATE_1 }; table.get_table_values = { - &sparse_tables::get_sparse_table_with_rotation_values<16, 0>, - &sparse_tables::get_sparse_table_with_rotation_values<16, 4>, - &sparse_tables::get_sparse_table_with_rotation_values<16, 7>, - &sparse_tables::get_sparse_table_with_rotation_values<16, 1>, + &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values, }; return table; } +/** + * @brief Constructs a MultiTable for decomposing e into sparse form and computing rotation components for Σ₁(e). + * + * @details + * ## Table Structure + * + * This table decomposes a 32-bit input into 3 limbs and produces three accumulated outputs: + * - C1: Normal form accumulator (e = L0 + L1·2¹¹ + L2·2²²) + * - C2: Sparse form accumulator (e_sparse = S0 + S1·B¹¹ + S2·B²², where B=28) + * - C3: Rotation accumulator for split-boundary rotations, with L1 correction baked in + * + * Limb structure: + * - L0: bits 0-10 (11 bits) + * - L1: bits 11-21 (11 bits) + * - L2: bits 22-31 (10 bits) + * + * ## Purpose + * + * Used to compute Σ₁(e) + Ch(e,f,g) for SHA-256 + * - Σ₁(e) = (e>>>6) ^ (e>>>11) ^ (e>>>25) + * - Ch(e,f,g) = (e & f) ^ (~e & g) + * + * In sparse base-28 form, XOR becomes addition (mod 28 per digit), allowing: + * e + 7·Σ₁(e) = e_sparse + 7·[(e>>>6) + (e>>>11) + (e>>>25)] + * + * Each rotation is decomposed per-limb. For each limb, rotations either: + * - Stay contiguous (handled via scalar multiplication by rotation coefficients c0, c1, c2) + * - Split across the bit-31/0 boundary (handled via lookup table in C3) + * + * ## Column 3 Correction Term + * + * When computing rotations, we multiply e_sparse by L0's rotation coefficient c0 (see `choose_with_sigma1`). + * This gives each limb a coefficient proportional to its position: + * - L0 gets c0 (correct) + * - L1 gets B¹¹·c0 (needs a correction to make it equal to c1) + * - L2 gets B²²·c0 (needs a correction to make it equal to c2) + * + * The L2 correction is handled directly in the `choose_with_sigma1` function. + * + * The L1 correction factor δ = c1 - B¹¹·c0 is baked into the C3 accumulator via column_3_coefficients. + * When C3 is accumulated, it produces: C3[0] = raw[0] + raw[2] + S1 + S1·δ + * The S1·δ term corrects L1's coefficient from B¹¹·c0 to c1. + * + */ inline MultiTable get_choose_input_table(const MultiTableId id = SHA256_CH_INPUT) { - /** - * When reading from our lookup tables, we can read from the differences between adjacent rows in program memory, - *instead of taking absolute values - * - * For example, if our layout in memory is: - * - * | 1 | 2 | 3 | - * | - | - | - | - * | a_1 | b_1 | c_1 | - * | a_2 | b_2 | c_2 | - * | ... | ... | ... | - * - * We can valdiate that (a_1 + q_0 * a_2) is a table key and (c_1 + q_1 * c_2), (b_1 + q_2 * b_2) are table values, - * where q_0, q_1, q_2 are precomputed constants - * - * This allows us to assemble accumulating sums out of multiple table reads, without requiring extra addition gates. - * - * We can also use this feature to evaluate our sha256 rotations more efficiently, when converting into sparse form. - * - * Let column 1 represents our 'normal' scalar, column 2 represents our scalar in sparse form - * - * It's simple enough to make columns 1 and 2 track the accumulating sum of our scalar in normal and sparse form. - * - * Column 3 contains terms we can combine with our accumulated sparse scalar, to obtain our rotated scalar. - * - * Each lookup table will be of size 2^11. as that allows us to decompose a 32-bit scalar into sparse form in 3 - *reads (2^16 is too expensive for small circuits) - * - * For example, if we want to rotate `a` by 6 bits, we make the first lookup access the table that rotates `b` by 6 - *bits. Subsequent table reads do not need to be rotated, as the 11-bit limbs will not cross 32-bit boundary and can - *be scaled by constants - * - * With this in mind, we want to tackle the SHA256 `ch` sub-algorithm - * - * This requires us to compute ((a >>> 6) ^ (a >>> 11) ^ (a >>> 25)) + ((a ^ b) ^ (~a ^ c)) - * - * In sparse form, we can represent this as: - * - * 7 * (a >>> 6) + (a >>> 11) + (a >>> 25) + (a + 2 * b + 3 * c) - * - * When decomposing a into sparse form, we would therefore like to obtain the following: - * - * 7 * (a >>> 6) + (a >>> 11) + (a >>> 25) + (a) - * - * We need to determine the values of the constants (q_1, q_2, q_3) that we will be scaling our lookup values by, - *when assembling our accumulated sums. - * - * We need the sparse representation of `a` elsewhere in the algorithm, so the constants in columns 1 and 2 are - *fixed. - * - **/ - - // scaling factors applied to a's sparse limbs, excluding the rotated limb - const std::array rot6_coefficients{ bb::fr(0), bb::fr(28).pow(11 - 6), bb::fr(28).pow(22 - 6) }; - const std::array rot11_coefficients{ bb::fr(28).pow(32 - 11), bb::fr(0), bb::fr(28).pow(22 - 11) }; - const std::array rot25_coefficients{ bb::fr(28).pow(32 - 25), bb::fr(28).pow(32 - 25 + 11), bb::fr(0) }; - - // these are the coefficients that we want - const std::array target_rotation_coefficients{ - rot6_coefficients[0] + rot11_coefficients[0] + rot25_coefficients[0], - rot6_coefficients[1] + rot11_coefficients[1] + rot25_coefficients[1], - rot6_coefficients[2] + rot11_coefficients[2] + rot25_coefficients[2], - }; - bb::fr column_2_row_1_multiplier = target_rotation_coefficients[0]; - - // this gives us the correct scaling factor for a0's 1st limb - std::array current_coefficients{ - column_2_row_1_multiplier, - bb::fr(28).pow(11) * column_2_row_1_multiplier, - bb::fr(28).pow(22) * column_2_row_1_multiplier, - }; - - // bb::fr column_2_row_3_multiplier = -(current_coefficients[2]) + target_rotation_coefficients[2]; - bb::fr column_3_row_2_multiplier = -(current_coefficients[1]) + target_rotation_coefficients[1]; + // L1 correction factor: δ = c1 - B¹¹·c0 (see block comment above) + bb::fr limb1_table_correction = + choose_rotation_coefficients[1] - choose_base.pow(11) * choose_rotation_coefficients[0]; std::vector column_1_coefficients{ bb::fr(1), bb::fr(1 << 11), bb::fr(1 << 22) }; - std::vector column_2_coefficients{ bb::fr(1), bb::fr(28).pow(11), bb::fr(28).pow(22) }; - std::vector column_3_coefficients{ bb::fr(1), column_3_row_2_multiplier + bb::fr(1), bb::fr(1) }; + std::vector column_2_coefficients{ bb::fr(1), choose_base.pow(11), choose_base.pow(22) }; + std::vector column_3_coefficients{ bb::fr(1), bb::fr(1) + limb1_table_correction, bb::fr(1) }; MultiTable table(column_1_coefficients, column_2_coefficients, column_3_coefficients); table.id = id; table.slice_sizes = { (1 << 11), (1 << 11), (1 << 10) }; - table.basic_table_ids = { SHA256_BASE28_ROTATE6, SHA256_BASE28, SHA256_BASE28_ROTATE3 }; - table.get_table_values.push_back(&sparse_tables::get_sparse_table_with_rotation_values<28, 6>); - table.get_table_values.push_back(&sparse_tables::get_sparse_table_with_rotation_values<28, 0>); - table.get_table_values.push_back(&sparse_tables::get_sparse_table_with_rotation_values<28, 3>); - // table.get_table_values = std::vector{ + /** + * Specify the functions defining the rotation to be applied to each of three limbs. This is only for handling the + * limbs which split across the 31/0-bit boundary when rotated. + * + * table_rotation = SHA256_rotation - limb_start_position + * + * | Limb | Start pos | SHA256 rot | Table rot | + * |------|-----------|------------|-------------| + * | L0 | 0 | 6 | 6 - 0 = 6 | + * | L1 | 11 | 11 | 11 - 11 = 0 | + * | L2 | 22 | 25 | 25 - 22 = 3 | + */ + table.basic_table_ids = { SHA256_BASE28_ROTATE6, SHA256_BASE28, SHA256_BASE28_ROTATE3 }; + table.get_table_values = { &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values }; - // &get_sha256_sparse_map_values<28, 0, 0>, - // &get_sha256_sparse_map_values<28, 3, 0>, - // }; return table; } -// This table (at third row and column) returns the sum of roations that "non-trivially wrap" +/** + * @brief Constructs a MultiTable for decomposing a into sparse form and computing rotation components for Σ₀(a). + * + * @details + * ## Table Structure + * + * This table decomposes a 32-bit input into 3 limbs and produces three accumulated outputs: + * - C1: Normal form accumulator (a = L0 + L1·2¹¹ + L2·2²²) + * - C2: Sparse form accumulator (a_sparse = S0 + S1·B¹¹ + S2·B²², where B=16) + * - C3: Rotation accumulator for split-boundary rotations, with L2 correction baked in + * + * Limb structure: + * - L0: bits 0-10 (11 bits) + * - L1: bits 11-21 (11 bits) + * - L2: bits 22-31 (10 bits) + * + * ## Purpose + * + * Used to compute Σ₀(a) + Maj(a,b,c) for SHA-256: + * - Σ₀(a) = (a>>>2) ^ (a>>>13) ^ (a>>>22) + * - Maj(a,b,c) = (a & b) ^ (a & c) ^ (b & c) + * + * In sparse base-16 form, XOR becomes addition (mod 16 per digit), allowing: + * a + 4·Σ₀(a) = a_sparse + 4·[(a>>>2) + (a>>>13) + (a>>>22)] + * + * Each rotation is decomposed per-limb. For each limb, rotations either: + * - Stay contiguous (handled via scalar multiplication by rotation coefficients c0, c1, c2) + * - Split across the bit-31/0 boundary (handled via lookup table in C3) + * + * ## Column 3 Correction Term + * + * When computing rotations, we multiply a_sparse by L0's rotation coefficient c0 (see `majority_with_sigma0`). + * This gives each limb a coefficient proportional to its position: + * - L0 gets c0 (correct) + * - L1 gets B¹¹·c0 (needs a correction to make it equal to c1) + * - L2 gets B²²·c0 (needs a correction to make it equal to c2) + * + * The L1 correction is handled directly in the `majority_with_sigma0` function. + * + * The L2 correction factor δ = c2 - B¹¹·c1 is baked into the C3 accumulator via column_3_coefficients. + * When C3 is accumulated, it produces: C3[0] = raw[0] + raw[1] + S2 + S2·δ + * The S2·δ term corrects L2's coefficient. + * + */ inline MultiTable get_majority_input_table(const MultiTableId id = SHA256_MAJ_INPUT) { - /** - * We want to tackle the SHA256 `maj` sub-algorithm - * - * This requires us to compute ((a >>> 2) ^ (a >>> 13) ^ (a >>> 22)) + ((a & b) ^ (a & c) ^ (b & c)) - * - * In sparse form, we can represent this as: - * - * 4 * (a >>> 2) + (a >>> 13) + (a >>> 22) + (a + b + c) - * - * - * We need to determine the values of the constants (q_1, q_2, q_3) that we will be scaling our lookup values by, - *when assembling our accumulated sums. - * - * We need the sparse representation of `a` elsewhere in the algorithm, so the constants in columns 1 and 2 are - *fixed. - * - **/ - constexpr uint64_t base = 16; - - // scaling factors applied to a's sparse limbs, excluding the rotated limb - const std::array rot2_coefficients{ bb::fr(0), bb::fr(base).pow(11 - 2), bb::fr(base).pow(22 - 2) }; - const std::array rot13_coefficients{ bb::fr(base).pow(32 - 13), bb::fr(0), bb::fr(base).pow(22 - 13) }; - const std::array rot22_coefficients{ bb::fr(base).pow(32 - 22), - bb::fr(base).pow(32 - 22 + 11), - bb::fr(0) }; - - // these are the coefficients that we want - const std::array target_rotation_coefficients{ - rot2_coefficients[0] + rot13_coefficients[0] + rot22_coefficients[0], - rot2_coefficients[1] + rot13_coefficients[1] + rot22_coefficients[1], - rot2_coefficients[2] + rot13_coefficients[2] + rot22_coefficients[2], - }; - - bb::fr column_2_row_3_multiplier = - target_rotation_coefficients[1] * (-bb::fr(base).pow(11)) + target_rotation_coefficients[2]; + // L2 correction factor: δ = c2 - B¹¹·c1 (see block comment above) + bb::fr limb2_table_correction = + majority_rotation_coefficients[2] - majority_base.pow(11) * majority_rotation_coefficients[1]; std::vector column_1_coefficients{ bb::fr(1), bb::fr(1 << 11), bb::fr(1 << 22) }; - std::vector column_2_coefficients{ bb::fr(1), bb::fr(base).pow(11), bb::fr(base).pow(22) }; - std::vector column_3_coefficients{ bb::fr(1), bb::fr(1), bb::fr(1) + column_2_row_3_multiplier }; + std::vector column_2_coefficients{ bb::fr(1), majority_base.pow(11), majority_base.pow(22) }; + std::vector column_3_coefficients{ bb::fr(1), bb::fr(1), bb::fr(1) + limb2_table_correction }; MultiTable table(column_1_coefficients, column_2_coefficients, column_3_coefficients); table.id = id; table.slice_sizes = { (1 << 11), (1 << 11), (1 << 10) }; + + /** + * Specify the functions defining the rotation to be applied to each of three limbs. This is only for handling the + * limbs which split across the 31/0-bit boundary when rotated. + * + * table_rotation = SHA256_rotation - limb_start_position + * + * | Limb | Start pos | SHA256 rot | Table rot | + * |------|-----------|------------|-------------| + * | L0 | 0 | 2 | 2 - 0 = 2 | + * | L1 | 11 | 13 | 13 - 11 = 2 | + * | L2 | 22 | 22 | 22 - 22 = 0 | + */ table.basic_table_ids = { SHA256_BASE16_ROTATE2, SHA256_BASE16_ROTATE2, SHA256_BASE16 }; - table.get_table_values = { - &sparse_tables::get_sparse_table_with_rotation_values<16, 2>, - &sparse_tables::get_sparse_table_with_rotation_values<16, 2>, - &sparse_tables::get_sparse_table_with_rotation_values<16, 0>, - }; + table.get_table_values = { &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values, + &sparse_tables::get_sparse_table_with_rotation_values }; return table; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sparse.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sparse.hpp index 8acf3be243cb..877fec8f1c6e 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sparse.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/sparse.hpp @@ -15,6 +15,17 @@ namespace bb::plookup::sparse_tables { +/** + * @brief Computes the C2 and C3 column values for a sparse lookup table with optional rotation. + * + * @tparam base The sparse form base + * @tparam num_rotated_bits The number of bits to rotate the input by for the C3 value (0 = no rotation) + * @param key The lookup key; key[0] is the input value (a limb), key[1] is unused + * @return {C2, C3} where: + * - C2 = sparse(input): the input converted to sparse base form + * - C3 = sparse(rotate32(input, num_rotated_bits)): the rotated input in sparse form + * (equals C2 if num_rotated_bits == 0) + */ template inline std::array get_sparse_table_with_rotation_values(const std::array key) { @@ -28,6 +39,25 @@ inline std::array get_sparse_table_with_rotation_values(const std::ar return { bb::fr(t0), bb::fr(t1) }; } +/** + * @brief Generates a BasicTable for converting values to sparse form with optional rotation. + * + * @details Creates a lookup table with three columns: + * - C1: Input value in normal form (the lookup key) + * - C2: Input converted to sparse base form + * - C3: Input rotated by num_rotated_bits, then converted to sparse form + * (equals C2 if num_rotated_bits == 0) + * + * Step sizes are configured for accumulator building: + * - C1 step: 2^11 (SHA-256 decomposes 32-bit words into three limbs of sizes {11, 11, 10} bits) + * - C2/C3 step: base^bits_per_slice (for sparse form accumulation) + * + * Also sets get_values_from_key to enable on-the-fly value computation during lookups. + * + * @tparam base The sparse form base + * @tparam bits_per_slice Number of bits in each table entry (determines table size = 2^bits_per_slice) + * @tparam num_rotated_bits The number of bits to rotate for C3 values (0 = no rotation) + */ template inline BasicTable generate_sparse_table_with_rotation(BasicTableId id, const size_t table_index) { @@ -65,6 +95,14 @@ inline BasicTable generate_sparse_table_with_rotation(BasicTableId id, const siz return table; } +/** + * @brief Computes the normalized output for a sparse value based on a provided normalization table + * + * @tparam base The sparse form base + * @tparam base_table The normalization lookup table (maps sparse digit → normalized bit(s)) + * @param key The lookup key; key[0] is the sparse input value, key[1] is unused + * @return {normalized_value, 0} where normalized_value has one output bit per input sparse digit + */ template inline std::array get_sparse_normalization_values(const std::array key) { @@ -72,8 +110,8 @@ inline std::array get_sparse_normalization_values(const std::array 0) { - uint64_t slice = input % base; - uint64_t bit = base_table[static_cast(slice)]; + const uint64_t slice = input % base; + const uint64_t bit = base_table[static_cast(slice)]; accumulator += (bit << count); input -= slice; input /= base; @@ -82,15 +120,29 @@ inline std::array get_sparse_normalization_values(const std::array inline BasicTable generate_sparse_normalization_table(BasicTableId id, const size_t table_index) { - /** - * If t = 7*((e >>> 6) + (e >>> 11) + (e >>> 25)) + e + 2f + 3g - * we can create a mapping between the 28 distinct values, and the result of - * (e >>> 6) ^ (e >>> 11) ^ (e >>> 25) + e + 2f + 3g - */ - BasicTable table; table.id = id; table.table_index = table_index; @@ -100,15 +152,15 @@ inline BasicTable generate_sparse_normalization_table(BasicTableId id, const siz numeric::sparse_int accumulator(0); numeric::sparse_int to_add(1); for (size_t i = 0; i < table_size; ++i) { - const auto& limbs = accumulator.get_limbs(); - uint64_t key = 0; + const std::array& limbs = accumulator.get_limbs(); + uint64_t normalized_output = 0; for (size_t j = 0; j < num_bits; ++j) { - const size_t table_idx = static_cast(limbs[j]); - key += ((base_table[table_idx]) << static_cast(j)); + const auto sparse_digit = limbs[j]; + normalized_output += base_table[sparse_digit] << j; } table.column_1.emplace_back(accumulator.get_sparse_value()); - table.column_2.emplace_back(key); + table.column_2.emplace_back(normalized_output); table.column_3.emplace_back(bb::fr(0)); accumulator += to_add; } @@ -116,7 +168,7 @@ inline BasicTable generate_sparse_normalization_table(BasicTableId id, const siz table.get_values_from_key = &get_sparse_normalization_values; table.column_1_step_size = bb::fr(table_size); - table.column_2_step_size = bb::fr(((uint64_t)1 << num_bits)); + table.column_2_step_size = bb::fr(1ULL << num_bits); table.column_3_step_size = bb::fr(0); return table; } diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp index 01947adad9cf..c0bc64d8ce0a 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp @@ -47,9 +47,6 @@ enum BasicTableId { SHA256_BASE28_ROTATE3, SHA256_BASE16, SHA256_BASE16_ROTATE2, - SHA256_BASE16_ROTATE6, - SHA256_BASE16_ROTATE7, - SHA256_BASE16_ROTATE8, UINT_XOR_SLICE_6_ROTATE_0, UINT_XOR_SLICE_2_ROTATE_0, UINT_XOR_SLICE_4_ROTATE_0, diff --git a/barretenberg/cpp/src/barretenberg/transcript/README.md b/barretenberg/cpp/src/barretenberg/transcript/README.md index 751ef1192114..6517df32acec 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/README.md +++ b/barretenberg/cpp/src/barretenberg/transcript/README.md @@ -659,7 +659,7 @@ VK HASHING WITH ORIGIN TAG ASSIGNMENT └────────────────────────────────────────────────────────────────┘ ┌──────────────────────────────────────────┐ - │ vk->hash_with_origin_tagging(domain, tx) │ + │ vk->hash_with_origin_tagging(tx) │ └──────────────────────────────────────────┘ │ ▼ @@ -840,7 +840,7 @@ Always use the dedicated method for hashing verification keys and verifier insta ```cpp // ✅ CORRECT - proper origin tag assignment -FF vk_hash = vk->hash_with_origin_tagging(domain_separator, *transcript); +FF vk_hash = vk->hash_with_origin_tagging(*transcript); transcript->add_to_hash_buffer("vk_hash", vk_hash); // ❌ WRONG - no origin tags in recursive verification diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp index a50b9cb9d5b0..3cb57a995cec 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp @@ -483,6 +483,13 @@ template class BaseTranscript { * @details Used by test fixtures to verify transcript conversion */ std::ptrdiff_t test_get_proof_start() const { return proof_start; } + + /** + * @brief Test utility: Get mutable reference to proof_data + * @details Used by test utilities that need to deserialize/serialize proof structure + */ + Proof& test_get_proof_data() { return proof_data; } + const Proof& test_get_proof_data() const { return proof_data; } }; using NativeTranscript = BaseTranscript>; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/README.md b/barretenberg/cpp/src/barretenberg/translator_vm/README.md new file mode 100644 index 000000000000..2255dec4ddd9 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/translator_vm/README.md @@ -0,0 +1,1446 @@ +# The Translator Circuit + +> **Warning**: This document provides a technical overview of the Translator Circuit used in the Goblin Plonk proving system. It is intended for understanding the design and optimizations. The code is the source of truth for implementation specifics. + +## Table of Contents + +1. [Overview](#overview) +2. [High-Level Statement](#high-level-statement) +3. [Architecture and Constants](#architecture-and-constants) +4. [Witness Trace Structure](#witness-trace-structure) +5. [Interleaving: The Key Optimization](#interleaving-the-key-optimization) +6. [Witness Generation and Proving Key Construction](#witness-generation-and-proving-key-construction) +7. [Translator Relations](#translator-relations) + +--- + +## Overview + +The Translator circuit is a critical component of the Goblin Plonk proving system in Aztec. It serves as a bridge between the Mega and ECCVM circuits. + +| Curve | Base Field | Scalar Field | Usage | +| -------- | -------------- | -------------- | ----------------------------------------- | +| BN254 | $\mathbb{F}_q$ | $\mathbb{F}_r$ | Used in Mega circuits | +| Grumpkin | $\mathbb{F}_r$ | $\mathbb{F}_q$ | Used in ECCVM for efficient EC operations | + +When proving recursive circuits with Mega circuit builder, we accumulate elliptic curve operations in an `EccOpQueue`. Proving these ECC operations is delegated to the ECCVM circuit, which operates over the Grumpkin curve. However, the same operations have different representations in the two circuits because: + +- Mega circuit operates over the BN254 scalar field $\mathbb{F}_r$ so elements in $\mathbb{F}_q$ are non-native (i.e., since $q > r$ they need to be decomposed into limbs in $\mathbb{F}_r$) +- ECCVM operates over the Grumpkin scalar field $\mathbb{F}_q$ so elements in $\mathbb{F}_q$ (as well as $\mathbb{F}_r$) are circuit native + +For example, consider the operation $(z \cdot P)$ where $P \equiv (P_x, P_y) \in \mathbb{F}_q^2$ is a point on the curve and $z \in \mathbb{F}_r$ is a scalar. The ECCVM arithmetisation represents this operation (in 1 row) as: + +| Opcode | $x$-coordinate | $y$-coordinate | Scalar $z_1$ | Scalar $z_2$ | Full scalar $z$ | +| ------ | -------------- | -------------- | ------------ | ------------ | --------------- | +| `MUL` | $P_x$ | $P_y$ | $z_1$ | $z_2$ | $z$ | +| | | | | | | + +The Mega circuit arithmetisation represents the same operation (in 2 rows) as: + +| Column 1 | Column 2 | Column 3 | Column 4 | +| -------- | -------------------- | -------------------- | -------------------- | +| `MUL` | $P_{x, \textsf{lo}}$ | $P_{x, \textsf{hi}}$ | $P_{y, \textsf{lo}}$ | +| $0$ | $P_{y, \textsf{hi}}$ | $z_1$ | $z_2$ | +| | | | | + +where $P_x = (P_{x, \textsf{lo}} + 2^{136} \cdot P_{x, \textsf{hi}}), \ P_y = (P_{y, \textsf{lo}} + 2^{136} \cdot P_{y, \textsf{hi}})$ and the scalar $z = (z_1 + 2^{128} \cdot z_2)$. Note that the limbs $P_{x/y, \textsf{lo}}, P_{x/y, \textsf{hi}}$ are elements in $\mathbb{F}_r$. + +We need to prove that these two representations are consistent, i.e., that the polynomial evaluations computed in the ECCVM circuit (over $\mathbb{F}_q$) match those computed in the Mega circuit (over $\mathbb{F}_r$). +The Translator circuit is a custom circuit designed to solve this problem. It: + +1. **Receives** the ECC op queue in Mega arithmetisation and the batched polynomial evaluation problem from ECCVM (operating over $\mathbb{F}_q$), +2. **Computes** the batched polynomial evaluation using non-native field arithmetic in $\mathbb{F}_r$ and, +3. **Verifies** that the result matches the evaluation provided by ECCVM. + +## High-Level Statement + +The Translator proves that the ECCVM's batched polynomial evaluation of the ECC operations is computed correctly. +Given: + +- A sequence of `UltraOp` operations from the `EccOpQueue` (each containing: $\text{op}, P_x, P_y, z_1, z_2$) +- An evaluation challenge $x \in \mathbb{F}_q$ +- A batching challenge $v \in \mathbb{F}_q$ + +Prove that: +$$\boxed{\text{accumulator}_{\text{final}} = \sum_{i=0}^{n-1} x^{n-1-i} \cdot \left( \text{op}_i + v \cdot P_x^{(i)} + v^2 \cdot P_y^{(i)} + v^3 \cdot z_1^{(i)} + v^4 \cdot z_2^{(i)} \right) \pmod{q}}$$ + +The batching via powers of $v$ combines the 5 values per operation into a single field element, and the powers of $x$ combine all operations into a single accumulator. + +Specifically, for each accumulation step (every 2 rows), prove: + +$$\text{acc}_{\text{curr}} = \text{acc}_{\text{prev}} \cdot x + \text{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 \pmod{q}$$ + +Note that we process the `EccOpQueue` in reverse order while computing the accumulator in steps: + +$$ +\begin{aligned} +\textcolor{orange}{\text{acc}_0} &= \textcolor{lightgrey}{0} \cdot x + \text{op}_{n-1} + P_x^{(n-1)} \cdot v + P_y^{(n-1)} \cdot v^2 + z_1^{(n-1)} \cdot v^3 + z_2^{(n-1)} \cdot v^4 \\ +\textcolor{lightgreen}{\text{acc}_1} &= \textcolor{orange}{\text{acc}_0} \cdot x + \text{op}_{n-2} + P_x^{(n-2)} \cdot v + P_y^{(n-2)} \cdot v^2 + z_1^{(n-2)} \cdot v^3 + z_2^{(n-2)} \cdot v^4 \\ +\textcolor{skyblue}{\text{acc}_2} &= \textcolor{lightgreen}{\text{acc}_1} \cdot x + \text{op}_{n-3} + P_x^{(n-3)} \cdot v + P_y^{(n-3)} \cdot v^2 + z_1^{(n-3)} \cdot v^3 + z_2^{(n-3)} \cdot v^4 \\ +&\ \ \vdots \\ +\textcolor{brown}{\text{acc}_{n-2}} &= \textcolor{grey}{\text{acc}_{n-3}} \cdot x + \text{op}_1 + P_x^{(1)} \cdot v + P_y^{(1)} \cdot v^2 + z_1^{(1)} \cdot v^3 + z_2^{(1)} \cdot v^4 \\ +\textcolor{violet}{\text{acc}_{n-1}} &= \textcolor{brown}{\text{acc}_{n-2}} \cdot x + \text{op}_0 + P_x^{(0)} \cdot v + P_y^{(0)} \cdot v^2 + z_1^{(0)} \cdot v^3 + z_2^{(0)} \cdot v^4 \\ +\end{aligned} +$$ + +The final accumulator value $\textcolor{violet}{\text{acc}_{n-1}}$ is what we need to verify against the ECCVM's output. +Note that the "previous" accumulator for the _last_ operation must be 0. + +**Method:** Since we cannot directly compute in $\mathbb{F}_q$ using $\mathbb{F}_r$ arithmetic (as $q \neq r$, and in fact $q > r$), we use non-native field arithmetic. Similar to the technique in [bigfield](../stdlib/primitives/bigfield/README.md), we prove the equation holds in integers: + +$$\text{acc}_{\text{prev}} \cdot x + \text{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 - \text{quotient} \cdot q - \text{acc}_{\text{curr}} = 0$$ + +We verify this by proving the equation holds: + +1. modulo $2^{272}$ (via 68-bit limb arithmetic split into two 136-bit checks) +2. modulo $r$ (natively in $\mathbb{F}_r$) +3. with range constraints on all limbs (prevents overflow/underflow) + +By the Chinese Remainder Theorem, since $2^{272} \cdot r > 2^{514}$ exceeds the maximum possible value, the equation must hold in integers, and thus modulo $q$. More details on this relation are in [RELATIONS.md](RELATIONS.md#non-native-field-relations). + +## Witness Trace Structure + +The Translator circuit has 81 witness columns, organized into: + +- 4 columns: `EccOpQueue` transcript ($\texttt{op}, P_x, P_y, z_1, z_2$ encoded across 2 rows) +- 13 columns: Limb decompositions (68-bit limbs for non-native arithmetic) +- 64 columns: Microlimb decompositions (14-bit microlimbs for range constraints) + +The circuit operates on a 2-row cycle structure. Each `EccOpQueue` entry occupies exactly 2 rows: + +- Row $2i$ (Even rows): Computation rows where the non-native field relation is actively checked +- Row $2i+1$ (Odd rows): Data storage rows that hold values accessed via shifts + +While enforcing constraints on the even rows, we can access values from the "next" row (which is odd) using shifted column polynomials. +As hinted earlier, the "previous" accumulator value needed for computation is stored at odd row $(2i+1)$. +This value becomes the "current" accumulator for the next even row $(2i+2)$: + +| Operation index | $0$ | $1$ | $\quad \dots \quad$ | $(n-2)$ | $(n-1)$ | +| -------------------- | -------------------------------------- | ------------------------------------- | ------------------- | ---------------------------------------- | ------------------------------------ | +| Current accumulator | $\textcolor{violet}{\text{acc}_{n-1}}$ | $\textcolor{brown}{\text{acc}_{n-2}}$ | $\quad \dots \quad$ | $\textcolor{lightgreen}{\text{acc}_{1}}$ | $\textcolor{orange}{\text{acc}_{0}}$ | +| Previous accumulator | $\textcolor{brown}{\text{acc}_{n-2}}$ | $\textcolor{grey}{\text{acc}_{n-3}}$ | $\quad \dots \quad$ | $\textcolor{orange}{\text{acc}_{0}}$ | $0$ | +| | | | | | | + +#### 1. EccOpQueue Transcript Columns (4 columns) + +These columns directly represent the EccOpQueue transcript: + +| Column Name | Even Row $(2i)$ | Odd Row $(2i+1)$ | Description | +| ----------- | -------------------------------- | ---------------------------- | ------------------------------------------------------------------ | +| `OP` | $\texttt{op} \in \{0, 3, 4, 8\}$ | 0 (no-op) | Opcode (the type of elliptic curve operation) | +| `X_LO_Y_HI` | $P_{x,\text{lo}}$ (136 bits) | $P_{y,\text{hi}}$ (118 bits) | Low 136 bits of $x$-coordinate and high 118 bits of $y$-coordinate | +| `X_HI_Z_1` | $P_{x,\text{hi}}$ (118 bits) | $z_1$ (128 bits) | High 118 bits of $x$-coordinate and first scalar | +| `Y_LO_Z_2` | $P_{y,\text{lo}}$ (136 bits) | $z_2$ (128 bits) | Low 136 bits of $y$-coordinate and second scalar | +| | | | | + +**Encoding scheme**: Point coordinates $P_x$ and $P_y$ are each 254 bits, split as: + +- $P_x = (P_{x,\text{hi}}$ (118 bits) $\|$ $P_{x,\text{lo}}$ (136 bits) $)$ +- $P_y = (P_{y,\text{hi}}$ (118 bits) $\|$ $P_{y,\text{lo}}$ (136 bits) $)$ + +#### 2. Limb Decomposition Columns (13 columns) + +These columns store finer-grained limb decompositions for non-native arithmetic: + +| Column Group | Even Row $(2i)$ | Odd Row $(2i+1)$ | Bits | Purpose | +| ----------------------------- | --------------------- | --------------------- | ------ | ---------------------------------------- | +| `P_X_LOW_LIMBS` | $P_{x,0}^{\text{lo}}$ | $P_{x,1}^{\text{lo}}$ | 68 | Limbs 0 & 1 of $P_{x}$ | +| `P_X_HIGH_LIMBS` | $P_{x,0}^{\text{hi}}$ | $P_{x,1}^{\text{hi}}$ | 68, 50 | Limbs 2 & 3 of $P_{x}$ | +| `P_Y_LOW_LIMBS` | $P_{y,0}^{\text{lo}}$ | $P_{y,1}^{\text{lo}}$ | 68 | Limbs 0 & 1 of $P_{y}$ | +| `P_Y_HIGH_LIMBS` | $P_{y,0}^{\text{hi}}$ | $P_{y,1}^{\text{hi}}$ | 68, 50 | Limbs 2 & 3 of $P_{y}$ | +| `Z_LOW_LIMBS` | $z_{1,0}$ | $z_{2,0}$ | 68 | Low limbs of $z_1$ and $z_2$ | +| `Z_HIGH_LIMBS` | $z_{1,1}$ | $z_{2,1}$ | 60 | High limbs of $z_1$ and $z_2$ | +| `ACCUMULATORS_BINARY_LIMBS_0` | $a_0^{\text{curr}}$ | $a_0^{\text{prev}}$ | 68 | Limb 0 of current/previous accumulator | +| `ACCUMULATORS_BINARY_LIMBS_1` | $a_1^{\text{curr}}$ | $a_1^{\text{prev}}$ | 68 | Limb 1 of current/previous accumulator | +| `ACCUMULATORS_BINARY_LIMBS_2` | $a_2^{\text{curr}}$ | $a_2^{\text{prev}}$ | 68 | Limb 2 of current/previous accumulator | +| `ACCUMULATORS_BINARY_LIMBS_3` | $a_3^{\text{curr}}$ | $a_3^{\text{prev}}$ | 50 | Limb 3 of current/previous accumulator | +| `QUOTIENT_LOW_BINARY_LIMBS` | $q_0$ | $q_1$ | 68 | Limbs 0 & 1 of quotient $\mathcal{Q}$ | +| `QUOTIENT_HIGH_BINARY_LIMBS` | $q_2$ | $q_3$ | 68, 52 | Limbs 2 & 3 of quotient $\mathcal{Q}$ | +| `RELATION_WIDE_LIMBS` | $c^{\text{lo}}$ | $c^{\text{hi}}$ | 84 | Carry/overflow from mod $2^{136}$ checks | + +**Key insight**: The accumulator columns demonstrate the shift mechanism: + +- Even row stores $a^{\text{curr}}$ (result of current computation) +- Odd row stores what will become $a^{\text{prev}}$ (input to next computation) +- Via shifts, even row $2i$ reads odd row $2i+1$ to get "previous" values + +#### 3. Range Constraint Microlimb Columns (64 columns) + +Each limb is further decomposed into 14-bit microlimbs for range checking. Each 68-bit limb has 5 microlimbs (14 bits each) plus a "tail" microlimb that enforces tight range constraints. The columns are organized as follows: + +| Column Group | Even Row $(2i)$ | Odd Row $(2i+1)$ | +| ---------------------------------------------- | -------------------------------------------------------- | --------------------------------------------------------- | +| Coordinate $P_x$ microlimbs | | | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_0` | $P_{x,0}[0]$ | $P_{x,1}[0]$ | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_1` | $P_{x,0}[1]$ | $P_{x,1}[1]$ | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_2` | $P_{x,0}[2]$ | $P_{x,1}[2]$ | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_3` | $P_{x,0}[3]$ | $P_{x,1}[3]$ | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_4` | $P_{x,0}[4]$ | $P_{x,1}[4]$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_0` | $P_{x,2}[0]$ | $P_{x,3}[0]$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_1` | $P_{x,2}[1]$ | $P_{x,3}[1]$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_2` | $P_{x,2}[2]$ | $P_{x,3}[2]$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_3` | $P_{x,2}[3]$ | $P_{x,3}[3]$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_4` | $P_{x,2}[4]$ | $\textcolor{yellow}{P_{x,3}[\textsf{tail}]}$ (reassigned) | +| Coordinate $P_y$ microlimbs | | | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_0` | $P_{y,0}[0]$ | $P_{y,1}[0]$ | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_1` | $P_{y,0}[1]$ | $P_{y,1}[1]$ | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_2` | $P_{y,0}[2]$ | $P_{y,1}[2]$ | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_3` | $P_{y,0}[3]$ | $P_{y,1}[3]$ | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_4` | $P_{y,0}[4]$ | $P_{y,1}[4]$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_0` | $P_{y,2}[0]$ | $P_{y,3}[0]$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_1` | $P_{y,2}[1]$ | $P_{y,3}[1]$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_2` | $P_{y,2}[2]$ | $P_{y,3}[2]$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_3` | $P_{y,2}[3]$ | $P_{y,3}[3]$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_4` | $P_{y,2}[4]$ | $\textcolor{yellow}{P_{y,3}[\textsf{tail}]}$ (reassigned) | +| Coordinate $z_1$ and $z_2$ microlimbs | | | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_0` | $z_{1,0}[0]$ | $z_{2,0}[0]$ | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_1` | $z_{1,0}[1]$ | $z_{2,0}[1]$ | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_2` | $z_{1,0}[2]$ | $z_{2,0}[2]$ | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_3` | $z_{1,0}[3]$ | $z_{2,0}[3]$ | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_4` | $z_{1,0}[4]$ | $z_{2,0}[4]$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_0` | $z_{1,1}[0]$ | $z_{2,1}[0]$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_1` | $z_{1,1}[1]$ | $z_{2,1}[1]$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_2` | $z_{1,1}[2]$ | $z_{2,1}[2]$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_3` | $z_{1,1}[3]$ | $z_{2,1}[3]$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_4` | $z_{1,1}[4]$ | $z_{2,1}[4]$ | +| Current and previous accumulator microlimbs | | | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_0` | $a_{0}^{\text{curr}}[0]$ | $a_{1}^{\text{curr}}[0]$ | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_1` | $a_{0}^{\text{curr}}[1]$ | $a_{1}^{\text{curr}}[1]$ | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_2` | $a_{0}^{\text{curr}}[2]$ | $a_{1}^{\text{curr}}[2]$ | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_3` | $a_{0}^{\text{curr}}[3]$ | $a_{1}^{\text{curr}}[3]$ | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_4` | $a_{0}^{\text{curr}}[4]$ | $a_{1}^{\text{curr}}[4]$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_0` | $a_{2}^{\text{curr}}[0]$ | $a_{3}^{\text{curr}}[0]$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_1` | $a_{2}^{\text{curr}}[1]$ | $a_{3}^{\text{curr}}[1]$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_2` | $a_{2}^{\text{curr}}[2]$ | $a_{3}^{\text{curr}}[2]$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_3` | $a_{2}^{\text{curr}}[3]$ | $a_{3}^{\text{curr}}[3]$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_4` | $a_{2}^{\text{curr}}[4]$ | $\textcolor{yellow}{a_{3}[\textsf{tail}]}$ (reassigned) | +| Quotient microlimbs | | | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_0` | $q_{0}[0]$ | $q_{1}[0]$ | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_1` | $q_{0}[1]$ | $q_{1}[1]$ | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_2` | $q_{0}[2]$ | $q_{1}[2]$ | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_3` | $q_{0}[3]$ | $q_{1}[3]$ | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_4` | $q_{0}[4]$ | $q_{1}[4]$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_0` | $q_{2}[0]$ | $q_{3}[0]$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_1` | $q_{2}[1]$ | $q_{3}[1]$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_2` | $q_{2}[2]$ | $q_{3}[2]$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_3` | $q_{2}[3]$ | $q_{3}[3]$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_4` | $q_{2}[4]$ | $\textcolor{yellow}{q_{3}[\textsf{tail}]}$ (reassigned) | +| Carry microlimbs | | | +| `RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_0` | $c^{\text{lo}}[0]$ | $c^{\text{hi}}[0]$ | +| `RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_1` | $c^{\text{lo}}[1]$ | $c^{\text{hi}}[1]$ | +| `RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_2` | $c^{\text{lo}}[2]$ | $c^{\text{hi}}[2]$ | +| `RELATION_WIDE_LIMBS_RANGE_CONSTRAINT_3` | $c^{\text{lo}}[3]$ | $c^{\text{hi}}[3]$ | +| Tail microlimbs | | | +| `P_X_LOW_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{P_{x,0}[\textsf{tail}]}$ | $\textcolor{yellow}{P_{x,1}[\textsf{tail}]}$ | +| `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{P_{x,2}[\textsf{tail}]}$ | $c^{\text{lo}}[4]$ (reassigned) | +| `P_Y_LOW_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{P_{y,0}[\textsf{tail}]}$ | $\textcolor{yellow}{P_{y,1}[\textsf{tail}]}$ | +| `P_Y_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{P_{y,2}[\textsf{tail}]}$ | $c^{\text{hi}}[4]$ (reassigned) | +| `Z_LOW_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{z_{1,0}[\textsf{tail}]}$ | $\textcolor{yellow}{z_{2,0}[\textsf{tail}]}$ | +| `Z_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{z_{1,1}[\textsf{tail}]}$ | $\textcolor{yellow}{z_{2,1}[\textsf{tail}]}$ | +| `ACCUMULATOR_LOW_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{a_{0}^{\text{curr}}[\textsf{tail}]}$ | $\textcolor{yellow}{a_{1}^{\text{curr}}[\textsf{tail}]}$ | +| `ACCUMULATOR_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{a_{2}^{\text{curr}}[\textsf{tail}]}$ | $c^{\text{lo}}[5]$ (reassigned) | +| `QUOTIENT_LOW_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{q_{0}[\textsf{tail}]}$ | $\textcolor{yellow}{q_{1}[\textsf{tail}]}$ | +| `QUOTIENT_HIGH_LIMBS_RANGE_CONSTRAINT_TAIL` | $\textcolor{yellow}{q_{2}[\textsf{tail}]}$ | $c^{\text{hi}}[5]$ (reassigned) | +| | | | + +The tail microlimbs (shown in yellow) enforce tight range constraints by ensuring top limbs use exactly the required number of bits (explained in the Decomposition Relation section of [RELATIONS.md](RELATIONS.md)). + +**Column reuse optimization:** Some columns are reassigned in odd rows to hold tail microlimbs for limbs that don't need all 5 microlimbs. For example, limb $P_{x, 3}$ is only 50 bits (= 3×14 + 8), requiring only 4 microlimbs. The 5th microlimb column `P_X_HIGH_LIMBS_RANGE_CONSTRAINT_4` at odd rows is therefore reassigned to hold the tail microlimb for $P_{x,3}$ (and carry values $c^{\text{lo}}[4]$, $c^{\text{hi}}[4]$, etc.). + +### Virtual Columns + +Some columns are "virtual" and not explicitly stored in the witness trace. Instead, they are computed on-the-fly during relation evaluation using existing columns. These include: + +- Interleaved columns for range constraint microlimbs (computed from the physical microlimb columns) +- Sorted (ordered) columns for range constraint microlimbs (computed by sorting the physical microlimb columns) + +### Mini-Circuit Layout + +The mini-circuit layout is illustrated below: + +- The first 4 columns represent the `EccOpQueue` transcript +- The next 13 columns represent limb decompositions +- The last 64 columns represent microlimb decompositions +- Let $n$ = total number of rows in the mini-circuit +- Let $r_{\textsf{start}}$ = number of rows for randomness at start +- Let $r_{\textsf{end}}$ = number of rows for randomness at the end +- Let $z_1$ = number of initial no-op rows (to ensure column polynomials are shiftable) + +Color coding in the diagram: + +- Purple boxes ($\textcolor{violet}{\textsf{purple}}$): Zero values or initial values + - For all columns: initial no-op row contains zeros + - For non-OP columns (13 limb + 64 microlimb): end randomness region initially contain zeros, later overwritten with random values for ZK sumcheck +- Grey boxes ($\textcolor{grey}{\textsf{grey}}$): Random values (no-op) in the 4 OP queue columns only + - These remain as random values from the random op processing + - Serve Merge Protocol ZK hiding +- Orange boxes ($\textcolor{orange}{\textsf{orange}}$): Main operation rows + - Contains actual ECC operation data for circuit computation + +$$ +\begin{array}{rlllll} +z_1 +& +\overbrace{ + \textcolor{violet}{ + \boxed{ + \hspace{0.95cm} + } + } +}^{4} +& +\textsf{\scriptsize $\longleftarrow$ initial no-op} +& +\overbrace{ +\textcolor{violet}{ + \boxed{ + \hspace{1.45cm} + } +} +}^{13} +& +\overbrace{ +\textcolor{violet}{ + \boxed{ + \hspace{3.85cm} + } +} +}^{64} +\\[2pt] +r_{\textsf{start}} +& +\textcolor{grey}{ + \boxed{ + \begin{array}{c} + \hspace{0.6cm} + \\ + \end{array} + } +} +& +\textsf{\scriptsize $\longleftarrow$ start randomness} +& +\textcolor{violet}{ + \boxed{ + \begin{array}{c} + \hspace{1.1cm} + \\ + \end{array} + } +} +& +\textcolor{violet}{ + \boxed{ + \begin{array}{c} + \hspace{3.5cm} + \\ + \end{array} + } +} +\\ \\[-10pt] +n - r_{\textsf{start}} - r_{\textsf{end}} - z_1 & +\textcolor{orange}{ + \boxed{ + \begin{array}{c} + \hspace{0.6cm} + \\ \\ \\ \\ \\ \\ \\ + \end{array} + } +} +& +\textsf{\scriptsize $\longleftarrow$ main ops} +& +\textcolor{orange}{ + \boxed{ + \begin{array}{c} + \hspace{1.1cm} + \\ \\ \\ \\ \\ \\ \\ + \end{array} + } +} +& +\textcolor{orange}{ + \boxed{ + \begin{array}{c} + \hspace{3.5cm} + \\ \\ \\ \\ \\ \\ \\ + \end{array} + } +} +\\ \\[-10pt] +r_{\textsf{end}} +& +\textcolor{grey}{ + \boxed{ + \begin{array}{c} + \hspace{0.6cm} + \\ + \end{array} + } +} +& +\textsf{\scriptsize $\longleftarrow$ end randomness} +& +\textcolor{violet}{ + \boxed{ + \begin{array}{c} + \hspace{1.1cm} + \\ + \end{array} + } +} +& +\textcolor{violet}{ + \boxed{ + \begin{array}{c} + \hspace{3.5cm} + \\ + \end{array} + } +} +\end{array} +$$ + +In our implementation, mini-circuit size is $n = 2^{13}$. We have $z_1 = 2$ (one no-op at the start to ensure shiftability), $r_{\textsf{start}} = 6$ (rows for three random ops at start), and $r_{\textsf{end}} = 4$ (rows for two random ops at end). Thus, the number of main operation rows is $n - r_{\textsf{start}} - r_{\textsf{end}} - z_1 = 8180$. + +The random ops serve dual purposes for zero-knowledge: + +**1. Merge Protocol ZK** (Grey boxes in 4 OP columns): + +- The 3 random ops at start add random values to the 4 ECC op queue columns (tail kernel blinding, ops prepended) +- The 2 random ops at end add random values to the 4 ECC op queue columns (hiding kernel blinding, ops appended) +- Together these contribute 40 random coefficients (10 per wire) to hide merge protocol evaluations +- See [MERGE_PROTOCOL.md](../goblin/MERGE_PROTOCOL.md#zk-considerations) for the degree-of-freedom analysis + +**2. ZK Sumcheck Masking** (Initially purple, then filled with random for non-OP columns): + +- The last $r_{\textsf{end}} = 4$ rows align exactly with `NUM_DISABLED_ROWS_IN_SUMCHECK = 4` (denoted as $m$ in the Lagrange table below) +- For the 4 OP queue columns: random values remain from the random ops +- For the remaining 77 columns (13 limb + 64 microlimb): + - Initially filled with zeros during random op processing + - These zeros are overwritten with random field elements during proving key construction + - This masking hides polynomial evaluations in ZK sumcheck + +### Lagrange Polynomials (Precomputed) + +The circuit uses Lagrange polynomials to control which constraints are active: ($I_{\text{size}} = 16$ is the number of columns interleaved together) + +| Polynomial | Description | Active Rows | +| ------------------------------ | ----------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `lagrange_first` | First row | $i = 0$ | +| `lagrange_real_last` | Last row in full circuit (before masking) | $i = 2^{17} - m \cdot I_{\text{size}} - 1$ | +| `lagrange_last` | Last row in full circuit | $i = 2^{17} - 1$ | +| `lagrange_masking` | Masking rows in full circuit | $i \in [2^{17} - m \cdot I_{\text{size}}, \ 2^{17})$ | +| `lagrange_mini_masking` | Masking rows in mini circuit | $i \in [z_1, \ z_1 + r_{\textsf{start}}) \cup [n - r_{\textsf{end}}, \ n)$ | +| `lagrange_even_in_minicircuit` | Even indices in real mini-circuit | $i \in \{u \ \| \ u \ \% \ 2 = 0, \ (z_1 + r_{\textsf{start}}) \leq u < n - r_{\textsf{end}}\}$ | +| `lagrange_odd_in_minicircuit` | Odd indices in real mini-circuit | $i \in \{u \ \| \ u \ \% \ 2 = 1, \ (z_1 + r_{\textsf{start}}) \leq u < n - r_{\textsf{end}}\}$ | +| `lagrange_last_in_minicircuit` | Last row in mini-circuit | $i = 8191$ (mini) | +| `lagrange_result_row` | Row containing final accumulator result | $i = (z_1 + r_{\textsf{start}})$ | +| `lagrange_last_in_minicircuit` | Last real row in mini-circuit | $i = (n - r_{\textsf{end}}) - 1$ | +| | | | + +## Interleaving: The Key Optimization + +The Translator must range-constrain approximately 64 different microlimb sets using permutation argument (and the delta range constraint). The permutation argument's degree equals $1 + \textsf{NUM\_COLS}$, where NUM_COLS is the number of columns being permuted: + +$$ +z_{\textsf{perm}}[i+1] \cdot \prod_{j=1}^{\textsf{NUM\_COLS}} (\textsf{ordered}[j] + \gamma) = +z_{\textsf{perm}}[i] \cdot \prod_{j=1}^{\textsf{NUM\_COLS}} (\textsf{interleaved}[j] + \gamma) +$$ + +The Problem: Permuting all ~64 microlimb columns simultaneously would require us to commit to all of them. Further, since the relation degree would be $1 + 64 = 65$, computing the sumcheck univariates could be a significant overhead for the prover. The prover would need to then commit to the univariates (instead of sending evaluations directly). + +The Solution: Interleave 16 logical columns into one virtual column, and create 4 such columns (plus 1 for the extra column). Each group can then perform an independent permutation check with degree $1 + 5 = 6$ (or 7 with Lagrange selector). This reduces the relation degree from 65 to 7. + +### Circuit Structure + +``` +Mini-circuit size: 2^13 = 8,192 rows (actual computation) +Full circuit size: 2^13 x 16 = 2^17 = 131,072 rows (after interleaving) +``` + +To compute the interleaved polynomials, we group 16 polynomials together and interleave their coefficients. Consider the following 16 polynomials each of size $n=2^{13}$ in the mini-circuit: + +$$ +\newcommand{\arraystretch}{1.2} +\begin{array}{|c|c|c|c|c|c|} +\hline +\textsf{index} & \textsf{poly 1} & \textsf{poly 2} & \textsf{poly 3} & \ldots & \textsf{poly 16} \\ +\hline +0 & \textcolor{skyblue}{a_0} & \textcolor{orange}{b_0} & \textcolor{lightgreen}{c_0} & \quad \ldots \quad & \textcolor{firebrick}{p_0} \\ +1 & \textcolor{skyblue}{a_1} & \textcolor{orange}{b_1} & \textcolor{lightgreen}{c_1} & \quad \ldots \quad & \textcolor{firebrick}{p_1} \\ +2 & \textcolor{skyblue}{a_2} & \textcolor{orange}{b_2} & \textcolor{lightgreen}{c_2} & \quad \ldots \quad & \textcolor{firebrick}{p_2} \\ +3 & \textcolor{skyblue}{a_3} & \textcolor{orange}{b_3} & \textcolor{lightgreen}{c_3} & \quad \ldots \quad & \textcolor{firebrick}{p_3} \\[5pt] +\vdots & \vdots & \vdots & \vdots & \ddots & \vdots \\[5pt] +n-1 & \textcolor{skyblue}{a_{n-1}} & \textcolor{orange}{b_{n-1}} & \textcolor{lightgreen}{c_{n-1}} & \quad \ldots \quad & \textcolor{firebrick}{p_{n-1}} \\ +\hline +\end{array} +\quad \longrightarrow \quad +\begin{array}{|c|c|c|} +\hline +\textsf{group} & \textsf{index} & \textsf{interleaved} \\ +\hline +0 & 0 & \textcolor{skyblue}{a_0} \\ +0 & 1 & \textcolor{orange}{b_0} \\ +0 & 2 & \textcolor{lightgreen}{c_0} \\ +\vdots & \vdots & \vdots \\[3pt] +0 & 15 & \textcolor{firebrick}{p_0} \\ \hline +1 & 16 & \textcolor{skyblue}{a_1} \\ +1 & 17 & \textcolor{orange}{b_1} \\ +1 & 18 & \textcolor{lightgreen}{c_1} \\ +\vdots & \vdots & \vdots \\[3pt] +1 & 31 & \textcolor{firebrick}{p_1} \\ \hline +\vdots & \vdots & \vdots \\ \hline +n-1 & 16n-16 & \textcolor{skyblue}{a_{n-1}} \\ +n-1 & 16n-15 & \textcolor{orange}{b_{n-1}} \\ +n-1 & 16n-14 & \textcolor{lightgreen}{c_{n-1}} \\ +\vdots & \vdots & \vdots \\[3pt] +n-1 & 16n-1 & \textcolor{firebrick}{p_{n-1}} \\ +\hline +\end{array} +$$ + +For 64 microlimb columns, we have 4 groups of 16 columns each, resulting in four interleaved polynomials each of size $16n = 2^{17}$. Note that the interleaved polynomials are not "physical" wires in the circuit trace: we refer to them as virtual polynomials. Each of these groups performs an independent permutation check: + +- Numerator: 4 interleaved wires + 1 extra = 5 terms +- Denominator: 5 ordered wires = 5 terms +- Degree: $1 + 5 = 6$ (or 7 with Lagrange) + +The permutation argument verifies that within each group, the interleaved values are a permutation of the ordered (sorted) values. Due to interleaving, the total circuit size increases 16×, requiring more zero-padding. Interleaving trades circuit size (inexpensive) for relation degree (expensive). The 16× size increase is acceptable given the 9× degree reduction. + +> **Effect on Commitment Scheme**: For polynomials $p_0, \dots, p_{15}$ of size $n$, the interleaved polynomial of size $16n$ is: +> $$p_{\textsf{interleaved}}(x) = \sum_{i=0}^{15} x^i \cdot p_{i}(x^{16})$$ +> The interleaved polynomials don't need to be committed explicitly; they can be opened (at, say $\gamma$) by using the commitments to the original polynomials and their evaluations (at $\gamma^{16}$). This is explained in more detail in the [Gemini](../commitment_schemes/gemini/README.md) documentation. + +## Witness Generation and Proving Key Construction + +This section details how the Translator circuit's witness polynomials are populated and how zero-knowledge is achieved through masking. + +### Overview + +Witness generation transforms the `EccOpQueue` from the Mega circuit into the 91 polynomials required by the Translator circuit: + +``` +Input: EccOpQueue (n operations) + Evaluation challenge x ∈ Fq + Batching challenge v ∈ Fq + +Output: 91 polynomials of size 2^17 + - 81 witness polynomials + - 5 ordered range constraint polynomials + - 4 interleaved range constraint polynomials (virtual) + - 1 precomputed extra numerator +``` + +**Note:** Witness generation happens in the **mini-circuit size** (2¹³ = 8,192 rows), then is expanded to **full circuit size** (2¹⁷ = 131,072 rows) through interleaving and zero-padding. + +### Step 1: Populate Transcript Polynomials + +The prover receives the `EccOpQueue` from the Mega circuit. Each entry contains: + +$$\texttt{UltraOp} = \{\texttt{op}, P_x, P_y, z_1, z_2\}$$ + +For operation $i$ at rows $2i$ (even) and $2i+1$ (odd), populate: + +**Even row ($2i$):** + +$$ +\begin{aligned} +\texttt{OP}[2i] &= \texttt{op}_i \\ +\texttt{X\_LO\_Y\_HI}[2i] &= P_{x,\text{lo}} = P_x \bmod 2^{136} \\ +\texttt{X\_HI\_Z\_1}[2i] &= P_{x,\text{hi}} = \lfloor P_x / 2^{136} \rfloor \\ +\texttt{Y\_LO\_Z\_2}[2i] &= P_{y,\text{lo}} = P_y \bmod 2^{136} +\end{aligned} +$$ + +**Odd row ($2i+1$):** + +$$ +\begin{aligned} +\texttt{OP}[2i+1] &= 0 \\ +\texttt{X\_LO\_Y\_HI}[2i+1] &= P_{y,\text{hi}} = \lfloor P_y / 2^{136} \rfloor \\ +\texttt{X\_HI\_Z\_1}[2i+1] &= z_1 \\ +\texttt{Y\_LO\_Z\_2}[2i+1] &= z_2 +\end{aligned} +$$ + +### Step 2: Compute Binary Limb Decompositions + +Each 136-bit transcript value is further decomposed into two 68-bit limbs. For $P_{x,\text{lo}}$: + +$$P_{x,\text{lo}} = P_{x,0}^{\text{lo}} + 2^{68} \cdot P_{x,1}^{\text{lo}}$$ + +where: + +- $P_{x,0}^{\text{lo}} = P_{x,\text{lo}} \bmod 2^{68}$ + +- $P_{x,1}^{\text{lo}} = \lfloor P_{x,\text{lo}} / 2^{68} \rfloor$ + +Even row ($2i$) limb assignments: + +$$ +\begin{aligned} +\texttt{P\_X\_LOW\_LIMBS}[2i] &= P_{x,0}^{\text{lo}} \\ +\texttt{P\_X\_HIGH\_LIMBS}[2i] &= P_{x,0}^{\text{hi}} \\ +\texttt{P\_Y\_LOW\_LIMBS}[2i] &= P_{y,0}^{\text{lo}} \\ +\texttt{P\_Y\_HIGH\_LIMBS}[2i] &= P_{y,0}^{\text{hi}} \\ +\texttt{Z\_LOW\_LIMBS}[2i] &= z_{1,0} \\ +\texttt{Z\_HIGH\_LIMBS}[2i] &= z_{1,1} +\end{aligned} +$$ + +Odd row ($2i+1$) limb assignments: + +$$ +\begin{aligned} +\texttt{P\_X\_LOW\_LIMBS}[2i+1] &= P_{x,1}^{\text{lo}} \\ +\texttt{P\_X\_HIGH\_LIMBS}[2i+1] &= P_{x,1}^{\text{hi}} \\ +\texttt{P\_Y\_LOW\_LIMBS}[2i+1] &= P_{y,1}^{\text{lo}} \\ +\texttt{P\_Y\_HIGH\_LIMBS}[2i+1] &= P_{y,1}^{\text{hi}} \\ +\texttt{Z\_LOW\_LIMBS}[2i+1] &= z_{2,0} \\ +\texttt{Z\_HIGH\_LIMBS}[2i+1] &= z_{2,1} +\end{aligned} +$$ + +### Step 3: Compute Accumulator and Quotient + +For each even row $2i$, compute the accumulator update and quotient. The accumulator evolves as: + +$$a^{\text{curr}} = a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 \pmod{q}$$ + +The current and the previous accumulators are decomposed into 4 limbs each: + +$$ +\begin{aligned} +a^{\text{curr}} &= +a_0^{\text{curr}} ++ +2^{68} \cdot a_1^{\text{curr}} ++ +2^{136} \cdot a_2^{\text{curr}} ++ +2^{204} \cdot a_3^{\text{curr}} +\\[5pt] +a^{\text{prev}} &= +a_0^{\text{prev}} ++ +2^{68} \cdot a_1^{\text{prev}} ++ +2^{136} \cdot a_2^{\text{prev}} ++ +2^{204} \cdot a_3^{\text{prev}} +\end{aligned} +$$ + +Since we're working in $\mathbb{F}_r$ (not $\mathbb{F}_q$), we must compute the quotient $\mathcal{Q}$ such that: + +$$a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 = \mathcal{Q} \cdot q + a^{\text{curr}}$$ + +$$ +\implies \mathcal{Q} = \left\lfloor \frac{a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4}{q} \right\rfloor +$$ + +The quotient is then decomposed into 4 limbs (68 + 68 + 68 + 52 bits): + +$$\mathcal{Q} = q_0 + 2^{68} \cdot q_1 + 2^{136} \cdot q_2 + 2^{204} \cdot q_3$$ + +**Carry computation:** The relation-wide limbs $c^{\text{lo}}$ and $c^{\text{hi}}$ (84 bits each) capture overflow from the mod $2^{136}$ checks: + +$$c^{\text{lo}} = \left\lfloor \frac{T_0 + 2^{68} \cdot T_1}{2^{136}} \right\rfloor, \quad c^{\text{hi}} = \left\lfloor \frac{c^{\text{lo}} + T_2 + 2^{68} \cdot T_3}{2^{136}} \right\rfloor$$ + +where $T_0, T_1, T_2, T_3$ are the limb contributions defined in [RELATIONS.md](RELATIONS.md). + +### Step 4: Microlimb Decomposition + +Each 68-bit limb is decomposed into five 14-bit microlimbs plus a tail microlimb for range tightening. For a general 68-bit limb $\ell$: + +$$\ell = \sum_{k=0}^{4} 2^{14k} \cdot m_k$$ + +where each $m_k \in [0, 2^{14})$ and $m_4 \in [0, 2^{12})$ (since $68 = 14 \times 4 + 12$). + +**Tail microlimb:** To enforce $m_4 < 2^{12}$, compute: + +$$m_{\text{tail}} = m_4 \cdot 2^{14-12} = m_4 \cdot 4$$ + +The decomposition relation enforces $m_{\text{tail}} \in [0, 2^{14})$, which implies $m_4 \in [0, 2^{12})$. For limbs with fewer bits, the tail microlimb is adjusted accordingly. + +- 50-bit limbs (top limb): $m_3 \in [0, 2^8) \implies$ tail shift is $2^{14-8} = 64$ +- 60-bit limbs (z high): $m_4 \in [0, 2^4)\implies$ tail shift is $2^{14-4} = 1024$ + +### Step 5: Construct Interleaved Polynomials + +The 64 microlimb columns are organized into 4 groups of 16 columns each. Each group is **interleaved** into a single polynomial at full circuit size. + +**Interleaving formula:** For group polynomials $\{p_0, p_1, \ldots, p_{15}\}$ each of mini-size $n = 2^{13}$: + +$$p_{\text{interleaved}}(x) = \sum_{j=0}^{15} x^j \cdot p_j(x^{16})$$ + +**In coefficient form:** Element at position $i \cdot 16 + j$ in the interleaved polynomial comes from row $i$ of polynomial $p_j$: + +$$p_{\text{interleaved}}[i \cdot 16 + j] = p_j[i] \quad \text{for } i \in [0, n), \ j \in [0, 16)$$ + +This expands the circuit from mini-size $2^{13}$ to full size $2^{17} = 2^{13} \times 16$. + +**Illustration:** We have a total of 64 microlimb columns, each with $n = 2^{13}$ rows (mini-circuit size). We illustrate the microlimb distribution and interleaving process below: + +1. Let $I_{\textsf{size}} = 16$ be the number of microlimb columns in one group. Since we have 64 microlimb columns, we will have 4 groups. + +2. Each group separates the microlimbs into circuit witnesses ($n-m$ rows in orange) and masking values ($m$ rows in gray). + +3. For each group, we interleave the microlimbs to create one interleaved polynomial of size $(n - m) \cdot I_{\textsf{size}}$ for circuit witnesses and $m \cdot I_{\textsf{size}}$ for masking values. + +$$ +\begin{array}{rllll} +n - m +& +\overbrace{ +\textcolor{orange}{ + \boxed{ + \begin{array}{ccccc} + \\ + \\ + \\ + & & W & & \\ + \\ + \\ + \\ + \end{array} + } +} +}^{I_{\textsf{size}}} +\ +\overbrace{ +\textcolor{orange}{ + \boxed{ + \begin{array}{ccccc} + \\ + \\ + \\ + & & W & & \\ + \\ + \\ + \\ + \end{array} + } +} +}^{I_{\textsf{size}}} +\ +\overbrace{ +\textcolor{orange}{ + \boxed{ + \begin{array}{ccccc} + \\ + \\ + \\ + & & W & & \\ + \\ + \\ + \\ + \end{array} + } +} +}^{I_{\textsf{size}}} +\ +\overbrace{ +\textcolor{orange}{ + \boxed{ + \begin{array}{ccccc} + \\ + \\ + \\ + & & W & & \\ + \\ + \\ + \\ + \end{array} + } +} +}^{I_{\textsf{size}}} +\\[2pt] +m +& +\textcolor{grey}{ + \boxed{ + \begin{array}{ccccc} + & & M & & \\ + \end{array} + } +} +\ +\textcolor{grey}{ + \boxed{ + \begin{array}{ccccc} + & & M & & \\ + \end{array} + } +} +\ +\textcolor{grey}{ + \boxed{ + \begin{array}{ccccc} + & & M & & \\ + \end{array} + } +} +\ +\textcolor{grey}{ + \boxed{ + \begin{array}{ccccc} + & & M & & \\ + \end{array} + } +} +\end{array} + +\xrightarrow[]{\textsf{interleaved polys}} + +\begin{array}{lllll} +I_1 \quad I_2 \quad I_3 \quad I_4 \\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +& +N - m \cdot I_{\textsf{size}} +\\ +\\[-10pt] +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +& +m \cdot I_{\textsf{size}} +\end{array} +$$ + +### Step 6: Construct Ordered (Sorted) Polynomials + +The permutation argument requires proving that the interleaved microlimbs equal the **sorted** microlimbs. The prover constructs 5 ordered polynomials by collecting microlimbs from all 64 columns, sorting them, and distributing across ordered polynomials with inserted step values. We first describe the mathematical setup and then illustrate the construction steps. + +**Constants:** + +- Mini-circuit size: $n = 2^{13} = 8{,}192$ +- Number of masked rows in mini-circuit: $m = 4$ +- Mini-circuit size without masking: $(n - m) = 8{,}188$ +- Number of interleaving groups: $G = 4$ +- Group size: $I_{\text{size}} = 16$ (polynomials per group) +- Full circuit size: $N := n \cdot I_{\text{size}} = 2^{17} = 131{,}072$ +- Circuit size without masking: $N_{\text{no-mask}} = N - m \cdot I_{\textsf{size}}$ +- Step sequence size: $N_{\text{steps}} = 5{,}462$ + +**Step sequence:** The sorted steps $\mathcal{S} = \{s_0, s_1, \ldots, s_{5461}\}$ where: +$$s_i = 3i \quad \text{for } i \in [0, 5461], \quad s_{5461} = 16{,}383 = 2^{14} - 1$$ + +This ensures coverage of all values in $[0, 2^{14})$ with max gap of 3. + +#### Step 6.1: Collect Microlimbs from Each Group + +For each group $g \in \{0, 1, 2, 3\}$, collect all microlimbs from its 16 polynomials: + +$$\mathcal{M}_g = \bigcup_{j=0}^{15} \Big\{ p_{g,j}[i] : i \in [0, (n-m)) \Big\}$$ + +where $p_{g,j}$ is the $j$-th polynomial in group $g$. Size of each group microlimb set: + +$$|\mathcal{M}_g| = 16 \times (n - m)$$ + +and total microlimbs across all groups: +$$|\mathcal{M}_0 \cup \mathcal{M}_1 \cup \mathcal{M}_2 \cup \mathcal{M}_3| = 64 \times (n - m).$$ + +#### Step 6.2: Determine Capacity for Each Ordered Polynomial + +Each ordered polynomial has size $N$ but must accommodate: + +1. Microlimbs from the circuit (actual witness values) +2. Step values $\mathcal{S}$ (for delta range constraint) + +**Capacity per ordered polynomial (for circuit microlimbs):** +$$C_{\text{capacity}} = N_{\text{no-mask}} - N_{\text{steps}} = N_{\text{no-mask}} - 5{,}462$$ + +This is the maximum number of witnesses each ordered polynomial can hold before adding step values. + +#### Step 6.3: Distribute Microlimbs to Ordered Polynomials + +For groups 0-3, construct `ordered_range_constraints_i` by: + +1. Collect microlimbs from group $g$: $\mathcal{M}_g$ +2. Take first $C_{\text{capacity}}$ elements (arbitrarily ordered at this point) +3. Add step values $\mathcal{S}$ +4. Sort the combined set + +Mathematically, for $g \in \{0, 1, 2, 3\}$: + +$$ +\text{ordered}[g]_{\text{unsorted}} = \begin{cases} +\mathcal{M}_g[k] & \text{if } k < C_{\text{capacity}} \\ +\mathcal{S}[k - C_{\text{capacity}}] & \text{if } C_{\text{capacity}} \leq k < N_{\text{no-mask}} \\ +0 & \text{if } k \geq N_{\text{no-mask}} \text{ (masking region)} +\end{cases} +$$ + +Then sort: +$$\text{ordered}[g] = \text{sort}(\text{ordered}[g]_{\text{unsorted}})$$ + +Overflow microlimbs: If $|\mathcal{M}_g| > C_{\text{capacity}}$, the excess microlimbs go to group 4: + +$$\mathcal{M}_{g,\text{overflow}} = \left\{ \mathcal{M}_g[k] : k \geq C_{\text{capacity}} \right\}$$ + +$$|\mathcal{M}_{g,\text{overflow}}| = |\mathcal{M}_g| - C_{\text{capacity}}$$ + +#### Step 6.4: Construct the 5th Ordered Polynomial + +The 5th ordered polynomial (`ordered_range_constraints_4`) collects all overflow: + +$$\mathcal{M}_{\text{overflow}} = \bigcup_{g=0}^{3} \mathcal{M}_{g,\text{overflow}}$$ + +Size of overflow: +$$|\mathcal{M}_{\text{overflow}}| = 4 \times |\mathcal{M}_{g,\text{overflow}}| = 4 \times (16 \times (n - m) - C_{\text{capacity}})$$ + +Then construct: + +$$ +\text{ordered}[4]_{\text{unsorted}} = \begin{cases} +\mathcal{M}_{\text{overflow}}[k] & \text{if } k < |\mathcal{M}_{\text{overflow}}| \\ +\mathcal{S}[k - |\mathcal{M}_{\text{overflow}}|] & \text{if } |\mathcal{M}_{\text{overflow}}| \leq k < |\mathcal{M}_{\text{overflow}}| + N_{\text{steps}} \\ +0 & \text{otherwise} +\end{cases} +$$ + +Then sort: +$$\text{ordered}[4] = \text{sort}(\text{ordered}[4]_{\text{unsorted}})$$ + +**Illustration:** We start with the 4 interleaved polynomials constructed earlier: + +1. First, we add an extra numerator polynomial $I_5$ containing the step values (shown in green, repeated 5 times) to enable the delta range constraint. + +2. The remainder of $I_5$ is filled with zero-padding (shown in violet) to match the size $N$ of the interleaved polynomials. + +3. In the four interleaved polynomials, we have circuit witness values (orange) and masking values (gray). We also show the overflow microlimbs that will go into the 5th ordered polynomial (smaller orange boxes). + +4. We then construct the ordered polynomials $O_1, \dots, O_5$ by adding the step values into each interleaved polynomial and sorting the witness values appropriately. + +5. The randomess in the masking region (gray) is redistributed to ensure that the multisets of the interleaved polynomials plus extra numerator equal the multisets of the ordered polynomials. Hence, the number of masking rows in each of the ordered polynomials is at least $\left\lfloor\frac{4 \cdot m \cdot I_{\textsf{size}}}{5}\right\rfloor$. The remainder of the rows in each ordered polynomial is filled with zero-padding. + +$$ +\begin{array}{rllll} +& I_1 \quad I_2 \quad I_3 \quad I_4 \\ +N - m \cdot I_{\textsf{size}} +& +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[60pt] +\end{array} +}} +\\ +\\[-10pt] +m \cdot I_{\textsf{size}} +& +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\end{array} + +\xrightarrow[]{\textsf{add extra numerator}} + +\begin{array}{lrrrrr} +I_1 \quad I_2 \quad I_3 \quad I_4 \\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\\ +\\[-10pt] +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\\ +\\[-10pt] +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-3pt]\\[-3pt] +\end{array} +}} +\end{array} + +\begin{array}{l} + I_5 \\ + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + s \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + s \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + s \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + s \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + s \\[1pt] + \end{array} + }} + \\ + \\[-13pt] + \textcolor{violet}{ + \boxed{ + \begin{array}{c} + \\ \\ z \\ \\[2pt] + \end{array} + }} +\end{array} + +\xrightarrow[]{\textsf{sort into ordered polys}} + + +\begin{array}{lrrrrr} +O_1 \quad O_2 \ \ O_3 \quad O_4 \\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\ +\textcolor{orange}{ +\boxed{ +\begin{array}{c} +\\ \\ \\ \\ \\ \\ \\[25pt] +\end{array} +}} +\\ +\\[-10pt] +\textcolor{lightgreen}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{lightgreen}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{lightgreen}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\ +\textcolor{lightgreen}{ +\boxed{ +\begin{array}{c} +\\[1pt] +\end{array} +}} +\\ +\\[-10pt] \hline\hline +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-6pt]\\[-6pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-6pt]\\[-6pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-6pt]\\[-6pt] +\end{array} +}} +\ +\textcolor{gray}{ +\boxed{ +\begin{array}{c} +\\[-6pt]\\[-6pt] +\end{array} +}} +\\ +\\[-10pt] +\textcolor{violet}{ +\boxed{ +\begin{array}{c} +\\[-2pt] +\end{array} +}} +\ +\textcolor{violet}{ +\boxed{ +\begin{array}{c} +\\[-2pt] +\end{array} +}} +\ +\textcolor{violet}{ +\boxed{ +\begin{array}{c} +\\[-2pt] +\end{array} +}} +\ +\textcolor{violet}{ +\boxed{ +\begin{array}{c} +\\[-2pt] +\end{array} +}} +\end{array} + +\begin{array}{l} + O_5 \\ + \textcolor{orange}{ + \boxed{ + \begin{array}{c} + \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{orange}{ + \boxed{ + \begin{array}{c} + \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{orange}{ + \boxed{ + \begin{array}{c} + \\[1pt] + \end{array} + }} + \\ + \\[-10pt] + \textcolor{orange}{ + \boxed{ + \begin{array}{c} + \\[1pt] + \end{array} + }} + \\ + \\[-12pt] + \textcolor{lightgreen}{ + \boxed{ + \begin{array}{c} + \\[1pt] + \end{array} + }} + \\ + \\[-13pt] + \textcolor{violet}{ + \boxed{ + \begin{array}{c} + \\[10pt] + \end{array} + }} + \\ + \\[-10pt] \hline\hline + \textcolor{gray}{ + \boxed{ + \begin{array}{c} + \\[-6pt]\\[-6pt] + \end{array} + }} + & + \longleftarrow {\scriptsize \textsf{randomness of size}} \left\lfloor\frac{4 \cdot m \cdot I_{\textsf{size}}}{5}\right\rfloor + \\ + \\[-10pt] + \textcolor{violet}{ + \boxed{ + \begin{array}{c} + \\[-2pt] + \end{array} + }} +\end{array} +$$ + +> In our case, we have $m=4$ and $I_{\textsf{size}}=16$ which results in $(m \cdot I_{\textsf{size}}) = 64$ masked rows in each interleaved polynomials. Thus, each ordered polynomial will have at least $\left\lfloor\frac{4 \cdot 64}{5}\right\rfloor = 51$ masked rows. The remainder masked rows are added to the respective ordered polynomials. The masking rows in each ordered polynomial are padded with zero values to ensure the multiset equality holds. +> +> As illustrated, the two sets of interleaved and ordered polynomials satisfy the multiset equality: +> $$\bigcup_{i=1}^5 I_i = \bigcup_{i=1}^5 O_i.$$ + +### Step 7: Zero-Knowledge Masking + +To achieve zero-knowledge, the translator uses two independent masking mechanisms: + +#### ZK Sumcheck Masking (Non-OP-Queue Wires) + +To hide polynomial evaluations used in ZK sumcheck, random values are added to the last $m = r_{\textsf{end}} = 4$ rows of the mini-circuit for all non-OP-queue wires (13 limb decomposition columns, 64 microlimb columns). + +1. During random op processing, these columns are initially filled with zeros +2. During proving key construction, these zeros are overwritten with random field elements +3. The Lagrange polynomial `lagrange_mini_masking` marks these last 4 rows as masked + +#### Permutation Argument Masking (Ordered Polynomials) + +For the permutation argument over microlimbs, a separate masking mechanism is used. +For each of the witness polynomials, the masking region is defined as the last $m = r_{\textsf{end}}$ rows of the mini-circuit size, indexed as: + +$$[n - m, \ n).$$ + +After interleaving, the 4 interleaved polynomials have random values at positions: + +$$[N - m \cdot I_{\textsf{size}}, \ N).$$ + +Where $m = r_{\textsf{end}} = 4$ (the same 4 rows used for ZK sumcheck masking). + +#### Redistributing Randomness to Ordered Polynomials + +The ordered polynomials must be committed (unlike interleaved polynomials, which are virtual). To maintain zero-knowledge, the prover redistributes the random values from the 4 interleaved to the 5 ordered polynomials. As illustrated above, each ordered polynomial receives approximately an equal share of the randomness from the interleaved polynomials. The total number of random values in the interleaved polynomials: + +$$M = 4 \cdot m \cdot I_{\textsf{size}}.$$ + +To distribute these $M$ random values to 5 ordered polynomials, each ordered polynomial receives $\left\lfloor\frac{M}{5}\right\rfloor$ random values in its masking region. The remaining random values (if $M$ is not divisible by 5) are distributed one per ordered polynomial starting from the first. Further, since + +$$ +\underbrace{(m \cdot I_{\textsf{size}})}_{\textsf{size of masking region}} > +\underbrace{\left\lfloor \left(\frac{4}{5} \cdot m \cdot I_{\textsf{size}}\right) \right\rfloor}_{\textsf{size of randomness in ordered polys}}, +$$ + +the remaining positions in the ordered masking region are filled with zeros. + +**Note:** The same random values appear in both interleaved and ordered polynomials (just at different positions within the masking region). This is why the $\beta \cdot L_{\text{mask}}$ term is needed in the permutation relation - see [RELATIONS.md](RELATIONS.md#permutation-relation-mathematical-specification) for details. + +Some positions in the ordered masking region contain random values, others contain zeros. The `ordered_extra_range_constraints_numerator` compensates for these zeros in the permutation check. + +### Step 8: Precomputed Polynomials + +Several polynomials are precomputed and independent of the witness. + +#### Lagrange Polynomials + +Define row-specific selectors: + +$$ +\begin{aligned} +\texttt{lagrange\_first}[i] &= \begin{cases} 1 & i = 0 \\ 0 & \text{otherwise} \end{cases} \\ +\texttt{lagrange\_last}[i] &= \begin{cases} 1 & i = 2^{17} - 1 \\ 0 & \text{otherwise} \end{cases} \\ +\texttt{lagrange\_even}[i] &= \begin{cases} 1 & i \in [0, 2^{13}), \ i \text{ even} \\ 0 & \text{otherwise} \end{cases} \\ +\texttt{lagrange\_odd}[i] &= \begin{cases} 1 & i \in [0, 2^{13}), \ i \text{ odd} \\ 0 & \text{otherwise} \end{cases} +\end{aligned} +$$ + +#### Ordered Extra Range Constraints Numerator + +This polynomial contains the "step values" repeated to balance the permutation: + +$$\texttt{ordered\_extra}[i \cdot 5 + j] = \text{sorted\_steps}[i] \quad \text{for } i \in [0, 5462), \ j \in [0, 5)$$ + +where $\text{sorted\_steps} = \{0, 3, 6, 9, \ldots, 16383\}$. + +This ensures the multisets balance: + +- **Numerator:** 4 interleaved + 1 extra (with 5 copies of each step value) +- **Denominator:** 5 ordered (each with 1 copy of each step value) + +## Translator Relations + +Constraints for the translator VM are specified in [RELATIONS.md](RELATIONS.md). diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/RELATIONS.md b/barretenberg/cpp/src/barretenberg/translator_vm/RELATIONS.md new file mode 100644 index 000000000000..6f6f5e5472de --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/translator_vm/RELATIONS.md @@ -0,0 +1,699 @@ +# Translator Relations + +The translator VM enforces several relations/constraints to ensure the correctness of non-native field arithmetic and other operations. The primary relation is the **Non-Native Field Relation**, which verifies that certain accumulations hold in a non-native field (the BN254 base field $\mathbb{F}_q$) while operating in the native field (the BN254 scalar field $\mathbb{F}_r$). + +Since we follow a two-row trace structure, some relations are only active on even rows, while others are only active on odd rows. Below is a summary of the relations and their activation patterns. + +| Constraint | No of subrelations | Active on even rows | Active on odd rows | +| ----------------------------- | ------------------ | ------------------- | ------------------ | +| Non-Native Field Relation | 3 | ✓ | ✗ | +| Decomposition Relation | 48 | ✓ | ✓ | +| Permutation Relation | 2 | ✓ | ✓ | +| Delta Range Constraint | 10 | ✓ | ✓ | +| Opcode Constraint Relation | 5 | ✓ | ✓ | +| Accumulator Transfer Relation | 12 | ✗ | ✓ (propagation) | +| Zero Constraints Relation | 68 | ✓ | ✓ | + +Lagrange selectors for activation: + +- $L_{\text{even}}$: Equals 1 on even rows, 0 elsewhere +- $L_{\text{odd}}$: Equals 1 on odd rows, 0 elsewhere + +## Table of Contents + +1. [Limb Decomposition Structure](#limb-decomposition-structure) +2. [Non-Native Field Relations](#non-native-field-relations) +3. [Decomposition Relation](#decomposition-relation) +4. [Permutation Relation](#permutation-relation) +5. [Delta Range Constraint Relation](#delta-range-constraint-relation) +6. [Extra Relations](#extra-relations) + - (a) [Opcode Constraint Relation](#opcode-constraint-relation) + - (b) [Accumulator Transfer Relation](#accumulator-transfer-relation) + - (c) [Zero Constraints Relation](#zero-constraints-relation) + +--- + +## Limb Decomposition Structure + +This table establishes all notation used in the relations: + +| Value | Description | Binary Limbs | Native $\mathbb{F}_r$ | +| ------------------------------- | -------------------- | ------------------------------------------------------------------------------------ | --------------------- | +| **Evaluation challenge** | +| $x$ | Evaluation point | $x_0, x_1, x_2, x_3$ | $x_4$ | +| **Batching challenges** | +| $v$ | Batching challenge | $v_0, v_1, v_2, v_3$ | $v_4$ | +| $v^2$ | v squared | $(v^2)_0, (v^2)_1, (v^2)_2, (v^2)_3$ | $(v^2)_4$ | +| $v^3$ | v cubed | $(v^3)_0, (v^3)_1, (v^3)_2, (v^3)_3$ | $(v^3)_4$ | +| $v^4$ | v to fourth | $(v^4)_0, (v^4)_1, (v^4)_2, (v^4)_3$ | $(v^4)_4$ | +| **Point coordinates (witness)** | +| $P_x$ | Point x-coordinate | $P_{x,0}^{\text{lo}}, P_{x,1}^{\text{lo}}, P_{x,0}^{\text{hi}}, P_{x,1}^{\text{hi}}$ | (reconstructed) | +| $P_y$ | Point y-coordinate | $P_{y,0}^{\text{lo}}, P_{y,1}^{\text{lo}}, P_{y,0}^{\text{hi}}, P_{y,1}^{\text{hi}}$ | (reconstructed) | +| **Z-values (witness, 128-bit)** | +| $z_1$ | 128-bit value | $z_{1,0}, z_{1,1}$ (only 2 limbs) | (reconstructed) | +| $z_2$ | 128-bit value | $z_{2,0}, z_{2,1}$ (only 2 limbs) | (reconstructed) | +| **Accumulator (witness)** | +| $a^{\text{prev}}$ | Previous accumulator | $a_0^{\text{prev}}, a_1^{\text{prev}}, a_2^{\text{prev}}, a_3^{\text{prev}}$ | (reconstructed) | +| $a^{\text{curr}}$ | Current accumulator | $a_0^{\text{curr}}, a_1^{\text{curr}}, a_2^{\text{curr}}, a_3^{\text{curr}}$ | (reconstructed) | +| **Quotient (witness)** | +| $\mathcal{Q}$ | Division quotient | $q_0, q_1, q_2, q_3$ | (reconstructed) | +| **Negative $q$ constant** | +| $\bar{q}$ | $-q \pmod{2^{272}}$ | $\bar{q}_0, \bar{q}_1, \bar{q}_2, \bar{q}_3$ | $\bar{q}_4$ | +| **Carries (witness)** | +| $c^{\text{lo}}$ | Lower carry | (single 84-bit value) | - | +| $c^{\text{hi}}$ | Higher carry | (single 84-bit value) | - | +| **Opcode (witness, small)** | +| $\texttt{op}$ | Operation code | (no decomposition, ≤ 8) | $\texttt{op}$ | + +#### Reconstruction Formula (General) + +For a 254-bit value decomposed as $\ell_0, \ell_1, \ell_2, \ell_3$: + +$$\boxed{\text{Value} = \ell_0 + 2^{68} \cdot \ell_1 + 2^{136} \cdot \ell_2 + 2^{204} \cdot \ell_3}$$ + +**Specific reconstructions:** + +The coordinates $P_x$ and $P_y$ are reconstructed as: + +$$P_x = P_{x,0}^{\text{lo}} + 2^{68} \cdot P_{x,1}^{\text{lo}} + 2^{136} \cdot P_{x,0}^{\text{hi}} + 2^{204} \cdot P_{x,1}^{\text{hi}}$$ + +$$P_y = P_{y,0}^{\text{lo}} + 2^{68} \cdot P_{y,1}^{\text{lo}} + 2^{136} \cdot P_{y,0}^{\text{hi}} + 2^{204} \cdot P_{y,1}^{\text{hi}}$$ + +The scalars $z_1$ and $z_2$ (both 128-bit) are reconstructed as: + +$$z_1 = z_{1,0} + 2^{68} \cdot z_{1,1}$$ + +$$z_2 = z_{2,0} + 2^{68} \cdot z_{2,1}$$ + +The accumulators are reconstructed as: + +$$a^{\text{prev}} = a_0^{\text{prev}} + 2^{68} \cdot a_1^{\text{prev}} + 2^{136} \cdot a_2^{\text{prev}} + 2^{204} \cdot a_3^{\text{prev}}$$ + +$$a^{\text{curr}} = a_0^{\text{curr}} + 2^{68} \cdot a_1^{\text{curr}} + 2^{136} \cdot a_2^{\text{curr}} + 2^{204} \cdot a_3^{\text{curr}}$$ + +The quotient $\mathcal{Q}$ is reconstructed as: + +$$\mathcal{Q} = q_0 + 2^{68} \cdot q_1 + 2^{136} \cdot q_2 + 2^{204} \cdot q_3$$ + +## Non-Native Field Relations + +We want to prove the following accumulation identity holds in $\mathbb{F}_q$: + +$$\boxed{a^{\text{curr}} = a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 \pmod{q}}$$ + +We can only perform arithmetic in $\mathbb{F}_r$, but we need to prove correctness in $\mathbb{F}_q$ (the base field). +To do this, we rewrite the above equation as an integer equation with quotient $\mathcal{Q}$: + +$$a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4 - \mathcal{Q} \cdot q - a^{\text{curr}} = 0 \quad (\text{in integers})$$ + +If this equation holds: + +1. Modulo $2^{272}$ (via limb arithmetic in $\mathbb{F}_r$), and +2. Modulo $r$ (native $\mathbb{F}_r$ computation), and +3. All values are properly range-constrained + +then it must hold in integers. This is because the Chinese Remainder Theorem guarantees that if an equation holds modulo two coprime moduli whose product exceeds the maximum possible value of the equation, then it holds over the integers. +Since all values are in $\mathbb{F}_q$, i.e., they are less than $q$, we have: + +$$ +\begin{aligned} +\textsf{max}(a^{\text{prev}} \cdot x + \texttt{op} + P_x \cdot v + P_y \cdot v^2 + z_1 \cdot v^3 + z_2 \cdot v^4) &< 5q^2 < 5 \cdot (2^{254})^2 < 2^{511} +\\ +\textsf{max}(\mathcal{Q} \cdot q) &< q^2 < (2^{254})^2 < 2^{508} +\end{aligned} +$$ + +Therefore, the maximum possible value of the left-hand side is less than $2^{511}$, while the moduli product is $2^{272} \cdot r > 2^{525} > 2^{511}$. +See [bigfield documentation](../stdlib/primitives/bigfield/README.md) for more details on non-native field arithmetic. + +The non-native field relation is enforced through three separate subrelations: + +| Subrelation | Purpose | Modulus | Limbs checked | +| ----------- | -------------------------- | --------- | ------------------------------------------ | +| 1 | Lower mod $2^{272}$ check | $2^{136}$ | Limbs 0, 1 | +| 2 | Higher mod $2^{272}$ check | $2^{136}$ | Limbs 2, 3 (with carry from subrelation 1) | +| 3 | Native field check | $r$ | Full native reconstruction | + +Together, these prove the relation holds in integers. + +### Subrelation 1: Lower Mod $2^{136}$ Check + +Prove that when we compute the accumulation formula using limbs 0 and 1, the result is a multiple of $2^{136}$. + +We compute the accumulation using: + +- Limb 0 terms (contribute at weight $2^0$) +- Limb 1 terms (contribute at weight $2^{68}$) + +The result should be: $\text{Result} = c^{\text{lo}} \cdot 2^{136}$ for some carry $c^{\text{lo}}$. +The limb 0 contribution is: + +$$ +\boxed{ + \begin{align*} + T_0 := &\; a_0^{\text{prev}} \cdot x_0 & \\ + &+ \texttt{op} \\ + &+ P_{x,0}^{\text{lo}} \cdot v_0 \\ + &+ P_{y,0}^{\text{lo}} \cdot (v^2)_0 \\ + &+ z_{1,0} \cdot (v^3)_0 \\ + &+ z_{2,0} \cdot (v^4)_0 \\ + &+ q_0 \cdot \bar{q}_0 \\ + &- a_0^{\text{curr}} + \end{align*} +} +$$ + +The limb 1 contribution is: + +$$ +\boxed{\begin{align*} +T_1 := &\; a_1^{\text{prev}} \cdot x_0 + a_0^{\text{prev}} \cdot x_1 & \\ +&+ P_{x,0}^{\text{lo}} \cdot v_1 + P_{x,1}^{\text{lo}} \cdot v_0 \\ +&+ P_{y,0}^{\text{lo}} \cdot (v^2)_1 + P_{y,1}^{\text{lo}} \cdot (v^2)_0 \\ +&+ z_{1,0} \cdot (v^3)_1 + z_{1,1} \cdot (v^3)_0 \\ +&+ z_{2,0} \cdot (v^4)_1 + z_{2,1} \cdot (v^4)_0 \\ +&+ q_0 \cdot \bar{q}_1 + q_1 \cdot \bar{q}_0 \\ +&- a_1^{\text{curr}} +\end{align*}} +$$ + +Thus, the combined subrelation is: + +$$\boxed{L_{\text{even}} \cdot \texttt{op} \cdot \left( T_0 + 2^{68} \cdot T_1 - 2^{136} \cdot c^{\text{lo}} \right) = 0}$$ + +Interpretation: + +- Compute $T_0$ (limb 0 contribution) +- Compute $T_1 \cdot 2^{68}$ (limb 1 contribution, shifted by 68 bits) +- Their sum should equal $c^{\text{lo}} \cdot 2^{136}$ +- If this holds, the lower 136 bits of the accumulation equation are correct + +This subrelation is only active when: + +- $L_{\text{even}} = 1$ (even rows in mini-circuit) +- $\texttt{op} \neq 0$ (not a no-op) + +### Subrelation 2: Higher Mod $2^{136}$ Check + +Prove that when we compute the accumulation formula using limbs 2 and 3, plus the carry from subrelation 1, the result is a multiple of $2^{136}$. + +We compute using: + +- The carry $c^{\text{lo}}$ from subrelation 1 +- Limb 2 terms (contribute at weight $2^{136}$) +- Limb 3 terms (contribute at weight $2^{204}$) + +The result should be: $\text{Result} = c^{\text{hi}} \cdot 2^{136}$ for some carry $c^{\text{hi}}$. +The limb 2 contribution (with carry) is: + +$$ +\boxed{\begin{align*} +T_2 := &\; c^{\text{lo}} \quad \textsf{(carry from subrelation 1)} & \\ +&+ a_2^{\text{prev}} \cdot x_0 + a_1^{\text{prev}} \cdot x_1 + a_0^{\text{prev}} \cdot x_2 \\ +&+ P_{x,0}^{\text{hi}} \cdot v_0 + P_{x,1}^{\text{lo}} \cdot v_1 + P_{x,0}^{\text{lo}} \cdot v_2 \\ +&+ P_{y,0}^{\text{hi}} \cdot (v^2)_0 + P_{y,1}^{\text{lo}} \cdot (v^2)_1 + P_{y,0}^{\text{lo}} \cdot (v^2)_2 \\ +&+ z_{1,1} \cdot (v^3)_1 + z_{1,0} \cdot (v^3)_2 \\ +&+ z_{2,1} \cdot (v^4)_1 + z_{2,0} \cdot (v^4)_2 \\ +&+ q_2 \cdot \bar{q}_0 + q_1 \cdot \bar{q}_1 + q_0 \cdot \bar{q}_2 \\ +&- a_2^{\text{curr}} +\end{align*}} +$$ + +The limb 3 contribution is: + +$$ +\boxed{\begin{align*} +T_3 := &\; a_3^{\text{prev}} \cdot x_0 + a_2^{\text{prev}} \cdot x_1 + a_1^{\text{prev}} \cdot x_2 + a_0^{\text{prev}} \cdot x_3 & \\ +&+ P_{x,1}^{\text{hi}} \cdot v_0 + P_{x,0}^{\text{hi}} \cdot v_1 + P_{x,1}^{\text{lo}} \cdot v_2 + P_{x,0}^{\text{lo}} \cdot v_3 \\ +&+ P_{y,1}^{\text{hi}} \cdot (v^2)_0 + P_{y,0}^{\text{hi}} \cdot (v^2)_1 + P_{y,1}^{\text{lo}} \cdot (v^2)_2 + P_{y,0}^{\text{lo}} \cdot (v^2)_3 \\ +&+ z_{1,1} \cdot (v^3)_2 + z_{1,0} \cdot (v^3)_3 \\ +&+ z_{2,1} \cdot (v^4)_2 + z_{2,0} \cdot (v^4)_3 \\ +&+ q_3 \cdot \bar{q}_0 + q_2 \cdot \bar{q}_1 + q_1 \cdot \bar{q}_2 + q_0 \cdot \bar{q}_3 \\ +&- a_3^{\text{curr}} +\end{align*}} +$$ + +The combined subrelation 2 is: + +$$\boxed{L_{\text{even}} \cdot \texttt{op} \cdot \left( T_2 + 2^{68} \cdot T_3 - 2^{136} \cdot c^{\text{hi}} \right) = 0}$$ + +Interpretation: + +- Start with carry $c^{\text{lo}}$ from subrelation 1 +- Add limb 2 contribution $T_2$ +- Add limb 3 contribution $T_3 \cdot 2^{68}$ +- Result should be $c^{\text{hi}} \cdot 2^{136}$ +- If this holds, the higher 136 bits are correct + +Together with Subrelation 1: We've proven the relation holds modulo $2^{272}$. + +### Subrelation 3: Native Field Check + +Prove the accumulation formula holds when computed directly in $\mathbb{F}_r$ (the native field). +First, reconstruct all values from their limbs: + +$$ +\begin{align*} +\tilde{P}_x &= P_{x,0}^{\text{lo}} + 2^{68} \cdot P_{x,1}^{\text{lo}} + 2^{136} \cdot P_{x,0}^{\text{hi}} + 2^{204} \cdot P_{x,1}^{\text{hi}} \pmod{r} \\ +\tilde{P}_y &= P_{y,0}^{\text{lo}} + 2^{68} \cdot P_{y,1}^{\text{lo}} + 2^{136} \cdot P_{y,0}^{\text{hi}} + 2^{204} \cdot P_{y,1}^{\text{hi}} \pmod{r} \\ +\tilde{z}_1 &= z_{1,0} + 2^{68} \cdot z_{1,1} \pmod{r} \\ +\tilde{z}_2 &= z_{2,0} + 2^{68} \cdot z_{2,1} \pmod{r} \\ +\tilde{a}^{\text{prev}} &= a_0^{\text{prev}} + 2^{68} \cdot a_1^{\text{prev}} + 2^{136} \cdot a_2^{\text{prev}} + 2^{204} \cdot a_3^{\text{prev}} \pmod{r} \\ +\tilde{a}^{\text{curr}} &= a_0^{\text{curr}} + 2^{68} \cdot a_1^{\text{curr}} + 2^{136} \cdot a_2^{\text{curr}} + 2^{204} \cdot a_3^{\text{curr}} \pmod{r} \\ +\tilde{\mathcal{Q}} &= q_0 + 2^{68} \cdot q_1 + 2^{136} \cdot q_2 + 2^{204} \cdot q_3 \pmod{r} +\end{align*} +$$ + +**Note:** The tilde indicates these are native field reconstructions in $\mathbb{F}_r$, not the original $\mathbb{F}_q$ values. + +The subrelation 3 is then: + +$$ +\boxed{\begin{align*} +L_{\text{even}} \cdot \texttt{op} \cdot \Big( &\tilde{a}^{\text{prev}} \cdot x_4 & \\ +&+ \texttt{op} \\ +&+ \tilde{P}_x \cdot v_4 \\ +&+ \tilde{P}_y \cdot (v^2)_4 \\ +&+ \tilde{z}_1 \cdot (v^3)_4 \\ +&+ \tilde{z}_2 \cdot (v^4)_4 \\ +&+ \tilde{\mathcal{Q}} \cdot \bar{q}_4 \\ +&- \tilde{a}^{\text{curr}} \Big) = 0 +\end{align*}} +$$ + +where: + +- All arithmetic is performed in $\mathbb{F}_{r}$ +- $x_4, v_4, (v^2)_4, (v^3)_4, (v^4)_4$ are the native field representations of the challenges +- $\bar{q}_4 = -q \pmod{r}$ + +Interpretation: + +- Reconstruct all limbed values back to native $\mathbb{F}_{r}$ elements +- Compute the accumulation formula directly in $\mathbb{F}_{r}$ +- If subrelations 1 and 2 prove it holds mod $2^{272}$, and subrelation 3 proves it holds mod $r$, then it holds in integers + +## Decomposition Relation + +The Decomposition Relation enforces the integrity of the limb decomposition system. While the Non-Native Field Relation proves the accumulation formula is correct, the Decomposition Relation proves all limb decompositions are valid. It consists of 48 subrelations organized into five categories: + +| Category | No. of Subrelations | Note | +| -------------------------------------- | ------------------- | --------------------------------------------------- | +| Accumulator microlimb decomposition | 4 | Active when $L_{\text{even}} \cdot \texttt{op} = 1$ | +| Point & scalar microlimb decomposition | 16 | Active when $L_{\text{even}} = 1$ | +| Wide limb decomposition | 2 | Decompose 84-bit carry limbs | +| Range constraint tightening | 20 | Enforce stricter bounds on highest microlimbs | +| Transcript decomposition | 6 | Prove 68-bit limbs reconstruct transcript values | +| | | | + +These work with the Permutation Relation and Delta Range Constraint which together prove each microlimb is in $[0, 2^{14})$. + +--- + +### The 14-bit Microlimb System + +Two-level decomposition hierarchy: + +1. Level 1 (68-bit limbs): 254-bit values → 68 + 68 + 68 + 50 bits +2. Level 2 (14-bit microlimbs): 68-bit limbs → 14 + 14 + 14 + 14 + 12 bits + +Microlimb reconstruction formula for a 68-bit limb $\ell$ with microlimbs $m_0, \ldots, m_4$: + +$$\boxed{\ell = m_0 + m_1 \cdot 2^{14} + m_2 \cdot 2^{28} + m_3 \cdot 2^{42} + m_4 \cdot 2^{56}}$$ + +Range constraints: + +- All microlimbs $m_j \in [0, 2^{14})$ (enforced by permutation) +- For 68-bit limbs: $m_4 \in [0, 2^{12})$ +- For 50-bit limbs: $m_3 \in [0, 2^{8})$ +- For 52-bit limbs: $m_3 \in [0, 2^{10})$ +- For 60-bit limbs: $m_4 \in [0, 2^{4})$ + +### Categories 1 and 2: Microlimb Decomposition (Subrelations 0-19) + +General pattern for decomposing a limb $\ell_i$ into microlimbs $\{\ell_{i,j}\}$: +$$\boxed{L_{\text{selector}} \cdot \left( \sum_{j=0}^{k} \ell_{i,j} \cdot 2^{14j} - \ell_i \right) = 0}$$ + +where $k=4$ for 68/60-bit limbs and $k=3$ for 50/52-bit limbs. + +**Subrelations 0-3:** Accumulator limbs $(a_0, a_1, a_2, a_3)$ with selector $L_{\text{even}} \cdot \texttt{op}$ + +- $a_3$ is 50-bit (uses only 4 microlimbs) + +**Subrelations 4-19:** Point coordinates and scalars with selector $L_{\text{even}}$ + +| Element | Limbs decomposed | Number of subrelations | Note | +| ------------- | ------------------------------------------------------------------------------------------ | ---------------------- | --------------------------------- | +| $P_y$ | $P_{y,0}^{\text{lo}}, \ P_{y,1}^{\text{lo}}, \ P_{y,0}^{\text{hi}}, \ P_{y,1}^{\text{hi}}$ | 4 | 68 + 68 + 68 + 50 bits | +| $z_1, z_2$ | $z_{1,0}, \ z_{2,0}, \ z_{1,1}, \ z_{2,1}$ | 4 | Each $z$ is 128-bit: 68 + 60 bits | +| $P_x$ | $P_{x,0}^{\text{lo}}, \ P_{x,1}^{\text{lo}}, \ P_{x,0}^{\text{hi}}, \ P_{x,1}^{\text{hi}}$ | 4 | 68 + 68 + 68 + 50 bits | +| $\mathcal{Q}$ | $q_0, q_1, q_2, q_3$ | 4 | 68 + 68 + 68 + 52 bits | +| | | | | + +### Category 3: Wide Limb Decomposition (Subrelations 20-21) + +Carry limbs $c^{\text{lo}}, c^{\text{hi}}$ are 84 bits (6 × 14-bit microlimbs). To save space, the 5th and 6th microlimbs are stored in unused "tail" columns: + +$$\boxed{L_{\text{even}} \cdot \left( \sum_{j=0}^{3} c_{i,j} \cdot 2^{14j} + c_{i,4} \cdot 2^{56} + c_{i,5} \cdot 2^{70} - c^{(i)} \right) = 0}$$ + +where $c^{(0)} = c^{\text{lo}}$, $c^{(1)} = c^{\text{hi}}$. +Microlimb reuse: + +- $c_{0,4}^{\text{micro}}$ = `p_x_high_limbs_range_constraint_tail_shift` +- $c_{0,5}^{\text{micro}}$ = `accumulator_high_limbs_range_constraint_tail_shift` +- $c_{1,4}^{\text{micro}}$ = `p_y_high_limbs_range_constraint_tail_shift` +- $c_{1,5}^{\text{micro}}$ = `quotient_high_limbs_range_constraint_tail_shift` + +### Category 4: Range Constraint Tightening (Subrelations 22-41) + +For limbs with $b = 14k + r$ bits (where $0 < r < 14$), the highest microlimb $m_k$ must satisfy $m_k < 2^r$. + +**Shift-and-scale technique:** + +For proving $m_k < 2^r$, we add a new variable $m_k^{\text{tail}}\in [0, 2^{14})$ defined as: + +$$m_k^{\text{tail}} := m_k \ll (14 - r).$$ + +Then enforce: + +$$\boxed{L_{\text{even}} \cdot \left( m_k \cdot 2^{14-r} - m_k^{\text{tail}} \right) = 0}$$ + +implying $m_k \in [0, 2^r)$. + +Shift factors: + +- for 68-bit limbs: $2^2 = 4$: Constrains to 12 bits +- for 52-bit limbs: $2^4 = 16$: Constrains to 10 bits +- for 50-bit limbs: $2^6 = 64$: Constrains to 8 bits +- for 60-bit limbs: $2^{10} = 1024$: Constrains to 4 bits + +Subrelations 22-41 apply this pattern to: + +| Elements | No of subrelations | Tail bits | +| ----------- | ------------------ | ------------------- | +| $P_x$ limbs | 4 | 12, 12, 12, 8 bits | +| $P_y$ limbs | 4 | 12, 12, 12, 8 bits | +| $z_1$ limbs | 2 | 12, 4 bits | +| $z_2$ limbs | 2 | 12, 4 bits | +| Accumulator | 4 | 12, 12, 12, 8 bits | +| Quotient | 4 | 12, 12, 12, 10 bits | +| | | | + +### Category 5: Transcript Value Reconstruction (Subrelations 42-47) + +These prove that 68-bit limbs correctly reconstruct EccOpQueue transcript values. +General pattern for composing two limbs into a transcript value: +$$\boxed{L_{\text{even}} \cdot \left( \ell_{\text{low}} + 2^{68} \cdot \ell_{\text{high}} - \text{transcript}_{\text{value}} \right) = 0}$$ + +Subrelations: + +| Column | Even row | Odd row | No. of subrelations | +| ----------- | ---------------------------- | ---------------------------- | ------------------- | +| `X_LO_Y_HI` | $P_{x,\text{lo}}$ (136 bits) | $P_{y,\text{hi}}$ (118 bits) | 2 | +| `X_HI_Z_1` | $P_{x,\text{hi}}$ (118 bits) | $z_1$ (128 bits) | 2 | +| `Y_LO_Z_2` | $P_{y,\text{lo}}$ (136 bits) | $z_2$ (128 bits) | 2 | +| | | | | + +#### Interaction with Delta Range Constraint + +The Decomposition Relation works in tandem with the Delta Range Constraint (a separate permutation argument): + +Delta Range Constraint proves: Every microlimb column (all `*_range_constraint_*` columns) contains only values in $[0, 2^{14})$. + +Decomposition Relation proves: + +1. Large limbs are correctly reconstructed from microlimbs +2. Highest microlimbs are more strictly bounded (4, 8, 10, or 12 bits) +3. Transcript values are correctly formed from 68-bit limbs + +Together they guarantee: All limb decompositions are valid and all values are correctly range-constrained. + +## Permutation Relation + +The Permutation Relation is the foundation of all range constraints in the Translator circuit. It proves that every microlimb value used in the circuit belongs to the set $[0, 2^{14} - 1]$. The relation uses a grand product argument comparing two multisets: + +- **Interleaved multiset:** All microlimbs as they appear in the circuit (spread across 16 segments due to interleaving) +- **Ordered multiset:** The same values, but sorted in ascending order + +If the two multisets are equal (i.e., one is a permutation of the other), then all values are valid. + +The relation consists of 2 subrelations: + +1. Grand product identity (degree 7) +2. Finalization check (degree 3) + +#### Interaction with the Delta Range Constraints + +The Permutation Relation works alongside the Delta Range Constraints to enforce microlimb ranges. We use a permutation argument to show that the multiset of microlimb values used in the circuit matches an ordered multiset containing all integers from $0$ to $2^{14} - 1 = 16383$. Instead of including all integers in the range $[0, 2^{14} - 1]$ explicitly, we use a "step" sequence with a fixed step size of 3: + +$$\{0, 3, 6, 9, \ldots, 16380, 16383\}$$ + +resulting in $\left\lceil\frac{16384}{3}\right\rceil = 5462$ values. This ensures that any microlimb value $ \leq 16383$ can be proven to be in range by showing it appears in the ordered multiset. We prove equality of multisets using a grand product argument. The correctness of the ordered multiset is proven by the Delta Range Constraints described in the next section. + +**Balancing the multisets:** The 4 interleaved wires contain only circuit microlimbs, while each of the 5 ordered wires contains circuit microlimbs plus the step sequence. To balance this, we add a 5th numerator wire (`ordered_extra_range_constraints_numerator`) containing 5 copies of the step sequence—one for each ordered wire. This ensures the multisets have equal cardinality. The Delta Range Constraints enforce that each value in the ordered multiset differs from the previous by at most 3. + +--- + +### Subrelation 1: Grand Product Identity + +**Purpose:** Prove the interleaved and ordered multisets are equal via grand product. + +The grand product polynomial $z_{\text{perm}}$ is defined recursively: + +$$\boxed{z_{\text{perm}}[i+1] \cdot \prod_{j=0}^{4} \left( w_j^{\text{ordered}}[i] + \beta \cdot L_{\text{mask}}[i] + \gamma \right) = z_{\text{perm}}[i] \cdot \prod_{j=0}^{4} \left( w_j^{\text{interleaved}}[i] + \beta \cdot L_{\text{mask}}[i] + \gamma \right)}$$ + +where: + +- $w_j^{\text{interleaved}}[i]$: The $j$-th interleaved range constraint wire at row $i$ +- $w_j^{\text{ordered}}[i]$: The $j$-th ordered (sorted) range constraint wire at row $i$ +- $\beta, \gamma$: Random challenges (from Fiat-Shamir) +- $L_{\text{mask}}[i]$: Lagrange polynomial indicating masking rows (for zero-knowledge) + +The term $(\beta \cdot L_{\text{mask}}[i])$ on both sides enforces that the zero-knowledge masking values in both sets are identical. +It is added only to the masking region, to avoid interfering with the actual circuit values (which must be in the range $[0, 2^{14} - 1]$). +The subrelation is then expressed, with boundary conditions, as: + +$$\boxed{\left( z_{\text{perm}} + L_{\text{first}} \right) \cdot \prod_{j=0}^{4} \left( w_j^{\text{interleaved}} + \beta \cdot L_{\text{mask}} + \gamma \right) = \left( z_{\text{perm}}^{\text{shift}} + L_{\text{last}} \right) \cdot \prod_{j=0}^{4} \left( w_j^{\text{ordered}} + \beta \cdot L_{\text{mask}} + \gamma \right)}$$ + +where: + +- $L_{\text{first}}$: Lagrange polynomial for first row ($z_{\text{perm}}[0] = 0$ is enforced implicitly) +- $L_{\text{last}}$: Lagrange polynomial for last row (we enforce $z_{\text{perm}}[\text{last}] = 0$ in subrelation 2) +- $z_{\text{perm}}^{\text{shift}}$: Shifted grand product polynomial ($z_{\text{perm}}[i+1]$) + +Note that $z_{\text{perm}}[0] = 0$ follows implicitly from the fact that we are opening $z_{\text{perm}}$ and $z_{\text{perm}}^{\text{shift}}$ both at the same challenge. +If the two multisets are equal: + +1. At each step, the products telescope: contributions cancel out +2. After processing all rows, the grand product returns to 1 (accounting for initialization/finalization) +3. If any value is out of range or missing from the sorted set, the product cannot telescope correctly + +Active when: All rows (both even and odd in the full interleaved circuit) + +Degree: 6 (each side is linear polynomial × product of 5 linear terms) + +--- + +### Subrelation 2: Finalization Check + +Purpose: Ensure the grand product polynomial returns to the correct value at the circuit boundary. + +$$\boxed{L_{\text{last}} \cdot z_{\text{perm}}^{\text{shift}} = 0}$$ + +Interpretation: + +- At the last row, $L_{\text{last}} = 1$ +- The shifted grand product $z_{\text{perm}}^{\text{shift}}$ (which is $z_{\text{perm}}$ at the row after last) must be 0 +- This ensures the telescoping completed correctly + +Active when: Last row only ($L_{\text{last}} = 1$) + +Degree: 2 (Lagrange × shifted polynomial) + +## Delta Range Constraint Relation + +The Delta Range Constraint Relation works in tandem with the Permutation Relation to prove that the ordered (sorted) multiset is actually sorted and bounded correctly. + +What it proves: + +1. The "ordered" wires are actually in non-descending order +2. Consecutive values differ by at most `SORT_STEP = 3` +3. The final value in each column is exactly $2^{14} - 1 = 16383$ + +The Permutation Relation only proves the multisets are equal. Without the Delta Range Constraint, an attacker could provide out of range values and the permutation would still pass if the interleaved set matches. + +The relation consists of 10 subrelations: + +- 5 consecutive difference checks (one per ordered wire) +- 5 maximum value checks (one per ordered wire) + +--- + +### Subrelations 1-5: Consecutive Difference Constraints + +Purpose: Enforce that each ordered wire is in non-descending order with maximum step 3. + +For each ordered wire $j \in \{0, 1, 2, 3, 4\}$: + +$$\boxed{\left( L_{\text{real\_last}} - 1 \right) \cdot \left( L_{\text{mask}} - 1 \right) \cdot \Delta_j \cdot (\Delta_j - 1) \cdot (\Delta_j - 2) \cdot (\Delta_j - 3) = 0}$$ + +where: +$$\Delta_j := w_j^{\text{ordered}}[i+1] - w_j^{\text{ordered}}[i].$$ + +When active, it forces: $\Delta_j \in \{0, 1, 2, 3\}$. The constraint is active when: + +- $L_{\text{real\_last}} = 0$ (not the last real row) +- $L_{\text{mask}} = 0$ (not a masking row) + +Why maximum step 3? +To ensure full coverage of $[0, 2^{14} - 1]$, we insert "step values" into the sorted array: + +- Start at 0 +- Insert values: 0, 3, 6, 9, ..., 16383 +- This creates `SORTED_STEPS_COUNT = (2^14 - 1) / 3 + 1 = 5462` steps + +Between these steps, actual microlimbs fill in the gaps. With $\Delta \in \{0, 1, 2, 3\}$: + +- No value can "jump over" a step value +- Every value $\leq 16383$ has a step value within distance 3 +- Therefore, all values in range can be represented + +Degree: 6 (product of 6 linear polynomials) + +--- + +### Subrelations 6-10: Maximum Value Constraints + +Ensure the final value in each sorted column is exactly $2^{14} - 1 = 16383$. +For each ordered wire $j \in \{0, 1, 2, 3, 4\}$: + +$$\boxed{L_{\text{real\_last}} \cdot \left( w_j^{\text{ordered}} - (2^{14} - 1) \right) = 0}$$ + +At the last real row ($L_{\text{real\_last}} = 1$): +$$w_j^{\text{ordered}}[\text{last}] = 2^{14} - 1 = 16383$$ + +This ensures: + +1. No value in the column exceeds $2^{14} - 1$ +2. The maximum value $2^{14} - 1$ is present in the sorted multiset +3. Combined with the difference constraint, all values are $\leq 2^{14} - 1$ + +Active when: Last real row only ($L_{\text{real\_last}} = 1$) + +Degree: 2 (Lagrange × difference) + +## Extra Relations + +To enforce the correctness of the opcodes and the accumulator lifecycle, we have a few additional relations. + +### Opcode Validity Check + +The Opcode Validity Check enforces that all operation codes (`op`) belong to the valid set: + +$$\boxed{\texttt{op} \in \{0, 3, 4, 8\}}$$ + +Valid opcodes: + +- `0`: No-op +- `3`: Equality and reset accumulator +- `4`: Scalar multiplication +- `8`: Point addition + +The constraint is expressed as a polynomial that has roots at the valid opcode values: + +$$\boxed{\left( L_{\text{mini\_mask}} - 1 \right) \cdot \texttt{op} \cdot (\texttt{op} - 3) \cdot (\texttt{op} - 4) \cdot (\texttt{op} - 8) = 0}$$ + +The constraint is active when $L_{\text{mini\_mask}} = 0$ (i.e., not a masking row in the mini-circuit). + +Degree: 5 (degree-1 Lagrange × degree-4 polynomial in `op`) + +--- + +### Accumulator Consistency with No-op + +These subrelations ensure that when the opcode is `0` (no-op), the accumulator remains unchanged between even rows. +For the other opcodes (`3`, `4`, `8`), this constraint does not apply and must be skipped. +Thus, for each accumulator limb $i \in \{0, 1, 2, 3\}$, we must enforce: + +$$\boxed{L_{\text{even}} \cdot (\texttt{op} - 3) \cdot (\texttt{op} - 4) \cdot (\texttt{op} - 8) \cdot \left( a_i^{\text{current}} - a_i^{\text{shifted}} \right) = 0}$$ + +Degree: 5 + +--- + +### Accumulator Transfer Relation + +The Accumulator Transfer Relation manages the lifecycle of the accumulator across the circuit: + +1. Initialization: Start with zero accumulator +2. Propagation: Copy accumulator from each odd row to the next even row +3. Finalization: Verify final accumulator matches expected result + +The relation consists of 12 subrelations: + +- 4 for propagation +- 4 for initialization (set to zero) +- 4 for finalization (check against expected result) + +#### Subrelations 1-4: Odd Row Propagation + +Ensure that we correctly copy the accumulator from each odd row to the next even row. +This is because the previous accumulator value (in this iteration) becomes the "current" value on the next iteration. +Refer to the [Witness Trace Structure](../translator_vm/README.md#witness-trace-structure) for details on how we compute the accumulator iteratively. + +Thus, for each limb $i \in \{0, 1, 2, 3\}$: + +$$\boxed{L_{\text{odd}} \cdot (L_{\text{real\_last}} - 1) \cdot \left( a_i^{\text{current}} - a_i^{\text{shifted}} \right) = 0}$$ + +This correctly "propagates" the accumulator value in computing the final accumulator. + +Active when: Odd rows only except the last real row in the mini-circuit (before masking). + +Degree: 3 + +#### Subrelations 5-8: Initialization + +Ensure the accumulator starts at zero at the beginning of the computation. Recall that we process the opcodes in reverse order, so the first "previous" accumulator corresponds to the last opcode processed. Thus, for each limb $i \in \{0, 1, 2, 3\}$: + +$$\boxed{L_{\text{real\_last}} \cdot a_i^{\text{current}} = 0}$$ + +This implies that at the last real row (before masking), all limbs of the accumulator are zero, ensuring the accumulator starts at 0. + +Degree: 2 (Lagrange × limb) + +#### Subrelations 9-12: Finalization + +Verify the final accumulator value matches the expected result from ECCVM. +For each limb $i \in \{0, 1, 2, 3\}$: + +$$\boxed{L_{\text{result}} \cdot \left( a_i^{\text{current}} - a_i^{\text{expected}} \right) = 0}$$ + +where $a_i^{\text{expected}}$ is provided as a relation parameter (derived from ECCVM output). The ECCVM circuit computes the batched evaluation: + +$$a^{\text{expected}} = \sum_{j=0}^{n-1} x^{n-1-j} \cdot \left( \texttt{op}_j + v \cdot P_{x,j} + v^2 \cdot P_{y,j} + v^3 \cdot z_{1,j} + v^4 \cdot z_{2,j} \right) \pmod{q}$$ + +The Translator must prove it computed the same value. The finalization check ensures that Translator's computation matches ECCVM's computation + +Active when: Result row only ($L_{\text{result}} = 1$), this row corresponds to the first real opcode in the mini-circuit. + +Degree: 2 (Lagrange × difference) + +--- + +### Zero Constraints Relation + +The Zero Constraints Relation enforces that certain witness wires are zero outside the mini-circuit. +Due to interleaving, the full circuit is 16× larger than the mini-circuit: + +- Mini-circuit: $2^{13} = 8,192$ rows (actual computation) +- Full circuit: $2^{17} = 131,072$ rows (for interleaving optimization) + +Rows outside the mini-circuit (rows 8,192 to 131,071) must be zero. All the range constraint microlimb wires and transcript wires should be zero outside the mini-circuit. Thus, for each such wire $w$, we enforce: + +$$\boxed{\left( L_{\text{even}} + L_{\text{odd}} - 1 \right) \cdot (L_{\text{mini\_mask}} - 1) \cdot w = 0}$$ + +Note that the Lagrange product is $0$ in the mini-circuit and $-1$ outside the mini-circuit, so this forces $w = 0$ there. + +Degree: 3 (Lagrange product × wire) + +--- diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp index e379dd223421..98dbf1b0dea5 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/translator_flavor.hpp @@ -40,7 +40,9 @@ class TranslatorFlavor { using FF = Curve::ScalarField; using BF = Curve::BaseField; using Polynomial = bb::Polynomial; - using Transcript = NativeTranscript; + using Codec = FrCodec; + using HashFunction = crypto::Poseidon2; + using Transcript = BaseTranscript; // indicates when evaluating sumcheck, edges must be extended to be MAX_PARTIAL_RELATION_LENGTH static constexpr bool USE_SHORT_MONOMIALS = false; @@ -845,11 +847,13 @@ class TranslatorFlavor { * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for * portability of our circuits. */ - class VerificationKey : public NativeVerificationKey_, Transcript> { + class VerificationKey : public NativeVerificationKey_, Codec, HashFunction> { + using Base = NativeVerificationKey_, Codec, HashFunction>; + public: // Default constuct the fixed VK based on circuit size 1 << CONST_TRANSLATOR_LOG_N VerificationKey() - : NativeVerificationKey_(1UL << CONST_TRANSLATOR_LOG_N, /*num_public_inputs=*/0) + : Base(1UL << CONST_TRANSLATOR_LOG_N, /*num_public_inputs=*/0) { this->pub_inputs_offset = 0; @@ -876,10 +880,9 @@ class TranslatorFlavor { * @brief Unused function because vk is hardcoded in recursive verifier, so no transcript hashing is needed. * * @param domain_separator - * @param transcript + * @param tag */ - fr hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + typename Base::DataType hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } @@ -887,8 +890,7 @@ class TranslatorFlavor { #ifndef NDEBUG bool compare(const VerificationKey& other) { - return NativeVerificationKey_, Transcript>::compare< - NUM_PRECOMPUTED_ENTITIES>(other, CommitmentLabels().get_precomputed()); + return Base::template compare(other, CommitmentLabels().get_precomputed()); } #endif }; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_transcript.test.cpp index 6221c1594eea..66d86f490a9e 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/mega_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/mega_transcript.test.cpp @@ -3,6 +3,7 @@ #include "barretenberg/numeric/bitop/get_msb.hpp" #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" +#include "barretenberg/stdlib/test_utils/tamper_proof.hpp" #include "barretenberg/transcript/transcript.hpp" #include "barretenberg/ultra_honk/prover_instance.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" @@ -19,7 +20,18 @@ template class MegaTranscriptTests : public ::testing::Test { static void SetUpTestSuite() { bb::srs::init_file_crs_factory(bb::srs::bb_crs_path()); } using ProverInstance = ProverInstance_; + using Prover = UltraProver_; + using Proof = typename Flavor::Transcript::Proof; using FF = Flavor::FF; + + static Proof export_serialized_proof(Prover& prover, const size_t num_public_inputs) + { + // reset internal variables needed for exporting the proof + // Note: compute_proof_length_for_export excludes IPA proof length since export_proof appends it separately + size_t proof_length = compute_proof_length_for_export(num_public_inputs); + prover.transcript->test_set_proof_parsing_state(0, proof_length); + return prover.export_proof(); + } /** * @brief Construct a manifest for a Mega Honk proof * @@ -297,42 +309,49 @@ TYPED_TEST(MegaTranscriptTests, StructureTest) using Prover = UltraProver_; using Verifier = UltraVerifier_; - if constexpr (IsAnyOf) { - // For compatibility with Goblin, MegaZKFlavor is using NativeTranscript which does not support - // serialize/deserialize full transcript methods. - GTEST_SKIP() << "Skipping StructureTest for MegaZKFlavor"; - } else { - // Construct a simple circuit of size n = 8 (i.e. the minimum circuit size) - typename Flavor::CircuitBuilder builder; - this->generate_test_circuit(builder); - - // Automatically generate a transcript manifest by constructing a proof - auto prover_instance = std::make_shared(builder); - Prover prover(prover_instance); - auto proof = prover.construct_proof(); - auto verification_key = std::make_shared(prover_instance->get_precomputed()); - auto vk_and_hash = std::make_shared(verification_key); - Verifier verifier(vk_and_hash); - EXPECT_TRUE(verifier.verify_proof(proof)); - - // try deserializing and serializing with no changes and check proof is still valid - prover.transcript->deserialize_full_transcript(verification_key->num_public_inputs); - prover.transcript->serialize_full_transcript(); - Verifier verifier2(vk_and_hash); - EXPECT_TRUE(verifier2.verify_proof(prover.export_proof())); // we have changed nothing so proof is still valid - - Commitment one_group_val = Commitment::one(); - FF rand_val = FF::random_element(); - prover.transcript->z_perm_comm = one_group_val * rand_val; // choose random object to modify - Verifier verifier3(vk_and_hash); - EXPECT_TRUE(verifier3.verify_proof( - prover.export_proof())); // we have not serialized it back to the proof so it should still be fine - - prover.transcript->serialize_full_transcript(); - Verifier verifier4(vk_and_hash); - EXPECT_FALSE(verifier4.verify_proof(prover.export_proof())); // the proof is now wrong after serializing it - - prover.transcript->deserialize_full_transcript(verification_key->num_public_inputs); - EXPECT_EQ(static_cast(prover.transcript->z_perm_comm), one_group_val * rand_val); - } + // Construct a simple circuit of size n = 8 (i.e. the minimum circuit size) + typename Flavor::CircuitBuilder builder; + this->generate_test_circuit(builder); + + // Automatically generate a transcript manifest by constructing a proof + auto prover_instance = std::make_shared(builder); + auto verification_key = std::make_shared(prover_instance->get_precomputed()); + auto vk_and_hash = std::make_shared(verification_key); + Prover prover(prover_instance, verification_key); + auto proof = prover.construct_proof(); + Verifier verifier(vk_and_hash); + EXPECT_TRUE(verifier.verify_proof(proof).result); + + const size_t virtual_log_n = Flavor::VIRTUAL_LOG_N; + + // Use StructuredProof test utility to deserialize/serialize proof data + StructuredProof proof_structure; + + // try deserializing and serializing with no changes and check proof is still valid + proof_structure.deserialize( + prover.transcript->test_get_proof_data(), verification_key->num_public_inputs, virtual_log_n); + proof_structure.serialize(prover.transcript->test_get_proof_data(), virtual_log_n); + + proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); + // we have changed nothing so proof is still valid + Verifier verifier2(vk_and_hash); + EXPECT_TRUE(verifier2.verify_proof(proof).result); + + Commitment one_group_val = Commitment::one(); + FF rand_val = FF::random_element(); + proof_structure.z_perm_comm = one_group_val * rand_val; // choose random object to modify + proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); + // we have not serialized it back to the proof so it should still be fine + Verifier verifier3(vk_and_hash); + EXPECT_TRUE(verifier3.verify_proof(proof).result); + + proof_structure.serialize(prover.transcript->test_get_proof_data(), virtual_log_n); + proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); + // the proof is now wrong after serializing it + Verifier verifier4(vk_and_hash); + EXPECT_FALSE(verifier4.verify_proof(proof).result); + + proof_structure.deserialize( + prover.transcript->test_get_proof_data(), verification_key->num_public_inputs, virtual_log_n); + EXPECT_EQ(static_cast(proof_structure.z_perm_comm), one_group_val * rand_val); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp index a87d7598383d..932c257a7106 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp @@ -62,7 +62,7 @@ template typename OinkProver::Proof OinkProve template void OinkProver::execute_preamble_round() { BB_BENCH_NAME("OinkProver::execute_preamble_round"); - fr vk_hash = honk_vk->hash_with_origin_tagging(domain_separator, *transcript); + fr vk_hash = honk_vk->hash_with_origin_tagging(*transcript); transcript->add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); vinfo("vk hash in Oink prover: ", vk_hash); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp index c2d7743c09d9..43dfe1d873fe 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_verifier.cpp @@ -53,7 +53,7 @@ template void OinkVerifier::execute_preamble_round() { auto vk = verifier_instance->get_vk(); - FF vk_hash = vk->hash_with_origin_tagging(domain_separator, *transcript); + FF vk_hash = vk->hash_with_origin_tagging(*transcript); transcript->add_to_hash_buffer(domain_separator + "vk_hash", vk_hash); vinfo("vk hash in Oink verifier: ", vk_hash); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp index 0795df4cbbba..85ed7997378c 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp @@ -7,6 +7,7 @@ #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/special_public_inputs/special_public_inputs.hpp" +#include "barretenberg/stdlib/test_utils/tamper_proof.hpp" #include "barretenberg/transcript/transcript.hpp" #include "barretenberg/ultra_honk/prover_instance.hpp" #include "barretenberg/ultra_honk/ultra_prover.hpp" @@ -201,7 +202,8 @@ template class UltraTranscriptTests : public ::testing::Test { Proof export_serialized_proof(Prover& prover, const size_t num_public_inputs) { // reset internal variables needed for exporting the proof - size_t proof_length = Flavor::PROOF_LENGTH_WITHOUT_PUB_INPUTS() + num_public_inputs; + // Note: compute_proof_length_for_export excludes IPA proof length since export_proof appends it separately + size_t proof_length = compute_proof_length_for_export(num_public_inputs); prover.transcript->test_set_proof_parsing_state(0, proof_length); return prover.export_proof(); } @@ -317,9 +319,6 @@ TYPED_TEST(UltraTranscriptTests, StructureTest) using Commitment = Flavor::Commitment; // Construct a simple circuit of size n = 8 (i.e. the minimum circuit size) auto builder = typename TestFixture::Builder(); - if constexpr (IsAnyOf) { - GTEST_SKIP() << "Not built for this parameter"; - } TestFixture::generate_test_circuit(builder); // Automatically generate a transcript manifest by constructing a proof @@ -333,9 +332,13 @@ TYPED_TEST(UltraTranscriptTests, StructureTest) const size_t virtual_log_n = Flavor::USE_PADDING ? CONST_PROOF_SIZE_LOG_N : prover_instance->log_dyadic_size(); + // Use StructuredProof test utility to deserialize/serialize proof data + StructuredProof proof_structure; + // try deserializing and serializing with no changes and check proof is still valid - prover.transcript->deserialize_full_transcript(verification_key->num_public_inputs, virtual_log_n); - prover.transcript->serialize_full_transcript(virtual_log_n); + proof_structure.deserialize( + prover.transcript->test_get_proof_data(), verification_key->num_public_inputs, virtual_log_n); + proof_structure.serialize(prover.transcript->test_get_proof_data(), virtual_log_n); proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); // we have changed nothing so proof is still valid @@ -344,18 +347,19 @@ TYPED_TEST(UltraTranscriptTests, StructureTest) Commitment one_group_val = Commitment::one(); FF rand_val = FF::random_element(); - prover.transcript->z_perm_comm = one_group_val * rand_val; // choose random object to modify + proof_structure.z_perm_comm = one_group_val * rand_val; // choose random object to modify proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); // we have not serialized it back to the proof so it should still be fine typename TestFixture::Verifier verifier3(vk_and_hash); EXPECT_TRUE(verifier3.verify_proof(proof).result); - prover.transcript->serialize_full_transcript(); + proof_structure.serialize(prover.transcript->test_get_proof_data(), virtual_log_n); proof = TestFixture::export_serialized_proof(prover, prover_instance->num_public_inputs()); // the proof is now wrong after serializing it typename TestFixture::Verifier verifier4(vk_and_hash); EXPECT_FALSE(verifier4.verify_proof(proof).result); - prover.transcript->deserialize_full_transcript(verification_key->num_public_inputs, virtual_log_n); - EXPECT_EQ(static_cast(prover.transcript->z_perm_comm), one_group_val * rand_val); + proof_structure.deserialize( + prover.transcript->test_get_proof_data(), verification_key->num_public_inputs, virtual_log_n); + EXPECT_EQ(static_cast(proof_structure.z_perm_comm), one_group_val * rand_val); } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp index e538cd94cd26..69d4344b930c 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp @@ -10,6 +10,7 @@ #include "barretenberg/flavor/ultra_rollup_recursive_flavor.hpp" #include "barretenberg/flavor/ultra_zk_recursive_flavor.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/special_public_inputs/special_public_inputs.hpp" #include "barretenberg/srs/global_crs.hpp" #include "barretenberg/stdlib/primitives/pairing_points.hpp" #include "barretenberg/stdlib/proof/proof.hpp" diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/flavor.hpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/flavor.hpp index 1e52560f793e..c2a444b2c5d4 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/flavor.hpp @@ -226,8 +226,17 @@ class AvmFlavor { std::vector public_inputs; }; - class VerificationKey - : public NativeVerificationKey_, Transcript, VKSerializationMode::NO_METADATA> { + class VerificationKey : public NativeVerificationKey_, + typename Transcript::Codec, + typename Transcript::HashFunction, + void, + VKSerializationMode::NO_METADATA> { + using Base = NativeVerificationKey_, + typename Transcript::Codec, + typename Transcript::HashFunction, + void, + VKSerializationMode::NO_METADATA>; + public: static constexpr size_t NUM_PRECOMPUTED_COMMITMENTS = NUM_PRECOMPUTED_ENTITIES; @@ -256,8 +265,7 @@ class AvmFlavor { * @brief Unimplemented because AVM VK is hardcoded so hash does not need to be computed. Rather, we just add * the provided VK hash directly to the transcript. */ - fr hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + typename Base::DataType hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); } diff --git a/barretenberg/cpp/src/barretenberg/vm2/constraining/recursion/recursive_flavor.hpp b/barretenberg/cpp/src/barretenberg/vm2/constraining/recursion/recursive_flavor.hpp index 26f45706a9dc..0d5902789eb2 100644 --- a/barretenberg/cpp/src/barretenberg/vm2/constraining/recursion/recursive_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/vm2/constraining/recursion/recursive_flavor.hpp @@ -58,8 +58,10 @@ class AvmRecursiveFlavor { using Base::Base; }; - class VerificationKey - : public StdlibVerificationKey_> { + class VerificationKey : public StdlibVerificationKey_, + NativeVerificationKey, + VKSerializationMode::NO_METADATA> { public: size_t log_fixed_circuit_size = MAX_AVM_TRACE_LOG_SIZE; VerificationKey(CircuitBuilder* builder, const std::shared_ptr& native_key) @@ -89,9 +91,7 @@ class AvmRecursiveFlavor { } } - std::vector to_field_elements() const override { throw_or_abort("Not intended to be used."); } - FF hash_with_origin_tagging([[maybe_unused]] const std::string& domain_separator, - [[maybe_unused]] Transcript& transcript) const override + FF hash_with_origin_tagging([[maybe_unused]] const OriginTag& tag) const override { throw_or_abort("Not intended to be used because vk is hardcoded in circuit."); }