diff --git a/Cargo.lock b/Cargo.lock index 842e2aeaeffe4..74a27b513f7e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1166,6 +1166,7 @@ dependencies = [ "proptest-derive", "rand 0.7.3", "rand_core 0.5.1", + "rayon", "ring 0.16.20", "serde", "serde-name", @@ -1419,6 +1420,7 @@ dependencies = [ "ff", "group", "hex", + "itertools 0.13.0", "merlin", "more-asserts", "num-bigint 0.3.3", diff --git a/crates/aptos-batch-encryption/Cargo.toml b/crates/aptos-batch-encryption/Cargo.toml index 7b7762ec9e3b1..5703daea33c54 100644 --- a/crates/aptos-batch-encryption/Cargo.toml +++ b/crates/aptos-batch-encryption/Cargo.toml @@ -60,9 +60,9 @@ harness = false name = "multi_point_eval" harness = false -[[bench]] -name = "fptx" -harness = false +# [[bench]] +# name = "fptx" +# harness = false [[bench]] name = "fptx_succinct" diff --git a/crates/aptos-batch-encryption/src/schemes/fptx_weighted.rs b/crates/aptos-batch-encryption/src/schemes/fptx_weighted.rs index bd8c68fbd23dc..63ce2875ecf28 100644 --- a/crates/aptos-batch-encryption/src/schemes/fptx_weighted.rs +++ b/crates/aptos-batch-encryption/src/schemes/fptx_weighted.rs @@ -4,7 +4,6 @@ use crate::{ errors::BatchEncryptionError, group::*, shared::{ - ark_serialize::*, ciphertext::{CTDecrypt, CTEncrypt, PreparedCiphertext, StandardCiphertext}, digest::{Digest, DigestKey, EvalProof, EvalProofs, EvalProofsPromise}, encryption_key::EncryptionKey, @@ -19,7 +18,11 @@ use crate::{ }, }; use anyhow::{anyhow, Result}; -use aptos_crypto::{weighted_config::WeightedConfigArkworks, SecretSharingConfig as _}; +use aptos_crypto::{ + arkworks::serialization::{ark_de, ark_se}, + weighted_config::WeightedConfigArkworks, + SecretSharingConfig as _, +}; use aptos_dkg::pvss::{ traits::{Reconstructable as _, Subtranscript}, Player, diff --git a/crates/aptos-batch-encryption/src/shared/algebra/fk_algorithm.rs b/crates/aptos-batch-encryption/src/shared/algebra/fk_algorithm.rs index 66ff6b8764bbf..3d640e68e5387 100644 --- a/crates/aptos-batch-encryption/src/shared/algebra/fk_algorithm.rs +++ b/crates/aptos-batch-encryption/src/shared/algebra/fk_algorithm.rs @@ -1,7 +1,8 @@ // Copyright (c) Aptos Foundation // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE use super::multi_point_eval::multi_point_eval; -use crate::shared::{algebra::multi_point_eval::multi_point_eval_naive, ark_serialize::*}; +use crate::shared::algebra::multi_point_eval::multi_point_eval_naive; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use ark_ec::VariableBaseMSM; use ark_ff::FftField; use ark_poly::{domain::DomainCoeff, EvaluationDomain, Radix2EvaluationDomain}; diff --git a/crates/aptos-batch-encryption/src/shared/ciphertext/bibe.rs b/crates/aptos-batch-encryption/src/shared/ciphertext/bibe.rs index 398c1589a5902..05e27ee30552c 100644 --- a/crates/aptos-batch-encryption/src/shared/ciphertext/bibe.rs +++ b/crates/aptos-batch-encryption/src/shared/ciphertext/bibe.rs @@ -8,10 +8,11 @@ use super::super::{ use crate::{ errors::BatchEncryptionError, group::{Fr, G1Affine, G2Affine, G2Prepared, PairingOutput, PairingSetting}, - shared::{ark_serialize::*, digest::EvalProof, encryption_key::EncryptionKey, ids::Id}, + shared::{digest::EvalProof, encryption_key::EncryptionKey, ids::Id}, traits::Plaintext, }; use anyhow::Result; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use ark_ec::{pairing::Pairing, AffineRepr}; use ark_serialize::CanonicalSerialize; use ark_std::{ diff --git a/crates/aptos-batch-encryption/src/shared/ciphertext/bibe_succinct.rs b/crates/aptos-batch-encryption/src/shared/ciphertext/bibe_succinct.rs index a9142056f441e..220f3f8c24f56 100644 --- a/crates/aptos-batch-encryption/src/shared/ciphertext/bibe_succinct.rs +++ b/crates/aptos-batch-encryption/src/shared/ciphertext/bibe_succinct.rs @@ -11,7 +11,6 @@ use crate::{ errors::BatchEncryptionError, group::{Fr, G1Affine, G2Affine, PairingOutput, PairingSetting}, shared::{ - ark_serialize::*, ciphertext::bibe::{BIBECTEncrypt, InnerCiphertext}, digest::{Digest, EvalProof}, encryption_key::AugmentedEncryptionKey, @@ -20,6 +19,7 @@ use crate::{ traits::Plaintext, }; use anyhow::Result; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use ark_ec::{pairing::Pairing, AffineRepr}; use ark_serialize::CanonicalSerialize; use ark_std::{ diff --git a/crates/aptos-batch-encryption/src/shared/digest.rs b/crates/aptos-batch-encryption/src/shared/digest.rs index 02a896e3df245..cfb749976c6fd 100644 --- a/crates/aptos-batch-encryption/src/shared/digest.rs +++ b/crates/aptos-batch-encryption/src/shared/digest.rs @@ -6,9 +6,10 @@ use super::ids::{ComputedCoeffs, Id, IdSet}; use crate::{ errors::BatchEncryptionError, group::{Fr, G1Affine, G1Projective, G2Affine, G2Projective, PairingSetting}, - shared::{algebra::fk_algorithm::FKDomain, ark_serialize::*, ids::UncomputedCoeffs}, + shared::{algebra::fk_algorithm::FKDomain, ids::UncomputedCoeffs}, }; use anyhow::{anyhow, Result}; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use ark_ec::{pairing::Pairing, AffineRepr, ScalarMul, VariableBaseMSM}; use ark_std::{ rand::{CryptoRng, RngCore}, diff --git a/crates/aptos-batch-encryption/src/shared/encryption_key.rs b/crates/aptos-batch-encryption/src/shared/encryption_key.rs index 10dd96571d5ad..9514d7458462c 100644 --- a/crates/aptos-batch-encryption/src/shared/encryption_key.rs +++ b/crates/aptos-batch-encryption/src/shared/encryption_key.rs @@ -3,12 +3,12 @@ use crate::{ group::G2Affine, shared::{ - ark_serialize::*, digest::Digest, key_derivation::{BIBEDecryptionKey, BIBEMasterPublicKey}, }, }; use anyhow::Result; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] diff --git a/crates/aptos-batch-encryption/src/shared/ids/mod.rs b/crates/aptos-batch-encryption/src/shared/ids/mod.rs index ce15d01a8a5a3..673b413751ece 100644 --- a/crates/aptos-batch-encryption/src/shared/ids/mod.rs +++ b/crates/aptos-batch-encryption/src/shared/ids/mod.rs @@ -2,11 +2,9 @@ // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE use crate::{ group::{Fr, G1Affine, G1Projective}, - shared::{ - algebra::mult_tree::{compute_mult_tree, quotient}, - ark_serialize::*, - }, + shared::algebra::mult_tree::{compute_mult_tree, quotient}, }; +use aptos_crypto::arkworks::serialization::{ark_de, ark_se}; use ark_ec::VariableBaseMSM as _; use ark_ff::field_hashers::{DefaultFieldHasher, HashToField}; use ark_poly::univariate::DensePolynomial; diff --git a/crates/aptos-batch-encryption/src/shared/key_derivation.rs b/crates/aptos-batch-encryption/src/shared/key_derivation.rs index 35f6780e81964..da121d2c56867 100644 --- a/crates/aptos-batch-encryption/src/shared/key_derivation.rs +++ b/crates/aptos-batch-encryption/src/shared/key_derivation.rs @@ -4,12 +4,15 @@ use super::symmetric; use crate::{ errors::BatchEncryptionError, group::{Fr, G1Affine, G2Affine, PairingSetting}, - shared::{ark_serialize::*, digest::Digest}, + shared::digest::Digest, traits::{DecryptionKeyShare, VerificationKey}, }; use anyhow::Result; use aptos_crypto::{ - arkworks::shamir::{Reconstructable, ShamirGroupShare, ShamirThresholdConfig}, + arkworks::{ + serialization::{ark_de, ark_se}, + shamir::{Reconstructable, ShamirGroupShare, ShamirThresholdConfig}, + }, player::Player, }; use ark_ec::{pairing::Pairing as _, AffineRepr}; diff --git a/crates/aptos-batch-encryption/src/shared/mod.rs b/crates/aptos-batch-encryption/src/shared/mod.rs index d455a68b8389d..b80313043d9bf 100644 --- a/crates/aptos-batch-encryption/src/shared/mod.rs +++ b/crates/aptos-batch-encryption/src/shared/mod.rs @@ -1,7 +1,6 @@ // Copyright (c) Aptos Foundation // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE pub mod algebra; -pub mod ark_serialize; pub mod ciphertext; pub mod digest; pub mod encryption_key; diff --git a/crates/aptos-crypto-derive/src/unions.rs b/crates/aptos-crypto-derive/src/unions.rs index 2257ce3059a1e..db3ac69ade0d3 100644 --- a/crates/aptos-crypto-derive/src/unions.rs +++ b/crates/aptos-crypto-derive/src/unions.rs @@ -277,20 +277,20 @@ pub fn impl_enum_signature( type VerifyingKeyMaterial = #pub_kt; type SigningKeyMaterial = #priv_kt; - fn verify(&self, message: &T, public_key: &Self::VerifyingKeyMaterial) -> std::result::Result<(), aptos_crypto::error::Error> { + fn verify(&self, message: &T, public_key: &Self::VerifyingKeyMaterial) -> std::result::Result<(), anyhow::Error> { match (self, public_key) { #match_struct_arms - _ => aptos_crypto::error::bail!( + _ => anyhow::bail!( "provided the wrong alternative in {:?}!", (self, public_key) ), } } - fn verify_arbitrary_msg(&self, message: &[u8], public_key: &Self::VerifyingKeyMaterial) -> std::result::Result<(), aptos_crypto::error::Error> { + fn verify_arbitrary_msg(&self, message: &[u8], public_key: &Self::VerifyingKeyMaterial) -> std::result::Result<(), anyhow::Error> { match (self, public_key) { #match_arms - _ => aptos_crypto::error::bail!( + _ => anyhow::bail!( "provided the wrong alternative in {:?}!", (self, public_key) ), diff --git a/crates/aptos-crypto/Cargo.toml b/crates/aptos-crypto/Cargo.toml index c580e2dab6522..d3d5f1e4c438e 100644 --- a/crates/aptos-crypto/Cargo.toml +++ b/crates/aptos-crypto/Cargo.toml @@ -58,6 +58,7 @@ proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } rand = { workspace = true } rand_core = { workspace = true } +rayon = { workspace = true } ring = { workspace = true } serde = { workspace = true } serde-name = { workspace = true } diff --git a/crates/aptos-crypto/benches/README.md b/crates/aptos-crypto/benches/README.md index 378f91817f811..5022bcb59a079 100644 --- a/crates/aptos-crypto/benches/README.md +++ b/crates/aptos-crypto/benches/README.md @@ -2,15 +2,15 @@ ## Batched Bulletproofs and DeKART -Go to `aptos-crypto`: +Go to `aptos-crypto/benches`: ``` -cd crates/aptos-crypto +cd crates/aptos-crypto/benches ``` -Install [`criterion-means`](https://crates.io/crates/cargo-criterion-means): +Install [`cargo-criterion-means`](https://crates.io/crates/cargo-criterion-means): ``` -cargo install criterion-means +cargo install cargo-criterion-means ``` Run the Bulletproof and DeKART benchmarks in one line via: @@ -19,3 +19,11 @@ Run the Bulletproof and DeKART benchmarks in one line via: ``` This will generate CSV data with the benchmark data, format it as Markdown and copy it to your clipboard! + +## Chunky PVSS + +Follow the same steps, but run the benchmarks via: + +``` +./run-pvss-benches.sh +``` \ No newline at end of file diff --git a/crates/aptos-crypto/benches/print-pvss-markdown-table.py b/crates/aptos-crypto/benches/print-pvss-markdown-table.py new file mode 100755 index 0000000000000..32dd70f5819aa --- /dev/null +++ b/crates/aptos-crypto/benches/print-pvss-markdown-table.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 +import sys, csv, re +from collections import defaultdict + +HEADER = [ + "Scheme", + "Setup", + "Deal (ms)", + "Serialize (ms)", + "Aggregate (ms)", + "Verify (ms)", + "Decrypt-share (ms)", +] + +V1_NAME = "chunky_v1" +V2_NAME = "chunky_v2" + +# Match patterns like "pvss/chunky_v1/bls12-381", "pvss_chunky_v1_bls12-381", or just "chunky_v1" +# Criterion converts slashes to underscores in CSV output, so we match both formats +V1_GROUP_PATTERN = re.compile(r"chunky_v1") +V2_GROUP_PATTERN = re.compile(r"chunky_v2") + +OPERATIONS = ["deal", "serialize", "aggregate", "verify", "decrypt-share"] + + +def read_rows(fp): + reader = csv.DictReader(fp) + for row in reader: + for k in row: + if isinstance(row[k], str): + row[k] = row[k].strip() + yield row + + +def ns_to_ms(ns): + return round(float(ns) / 1e6, 2) + + +def fmt_ms(x): + return f"{x:,.2f}" + + +def fmt_ratio(r): + return f"{r:.2f}x" + + +GREEN = 'color:#15803d' # bold green when faster +RED = 'color:#dc2626' # red when slower + + +def decorate_v2(value_ms, ratio): + """ + Returns: + display_text: plain text '1,234.56 (1.20x)' for width calculation + render_text: HTML-styled number only; ratio stays uncolored + faster (ratio > 1.0): bold green number + slower (ratio < 1.0): red number + equal (ratio == 1.0): unstyled number + """ + num_txt = fmt_ms(value_ms) + ratio_txt = f" ({fmt_ratio(ratio)})" + display = num_txt + ratio_txt + + if ratio > 1.0: + # v2 is faster (ratio > 1.0 means v1 took longer) → bold green number + render = f"{num_txt}{ratio_txt}" + elif ratio < 1.0: + # v2 is slower (ratio < 1.0 means v1 took less time) → red number + render = f"{num_txt}{ratio_txt}" + else: + # equal → no styling + render = display + + return display, render + + +def parse_group(group): + """Parse the Group column to determine if it's v1 or v2.""" + if V1_GROUP_PATTERN.search(group): + return "v1" + elif V2_GROUP_PATTERN.search(group): + return "v2" + return None + + +def parse_operation(ident): + """Extract operation type from Id column like 'deal/...' or 'deal_...' or 'serialize/...'""" + for op in OPERATIONS: + if ident.startswith(op + "/") or ident.startswith(op + "_"): + return op + return None + + +def parse_setup(ident, parameter): + """ + Parse the setup identifier from either the Id column or Parameter column. + The Id column has format like 'deal/{config_string}', so we extract the config part. + If Parameter is non-empty, use that; otherwise extract from Id after the operation prefix. + """ + if parameter and parameter.strip(): + return parameter.strip() + + # Extract from Id field: "deal/{config}" -> "{config}" + for op in OPERATIONS: + if ident.startswith(op + "/"): + return ident[len(op) + 1:] # Remove "op/" prefix + elif ident.startswith(op + "_"): + return ident[len(op) + 1:] # Remove "op_" prefix + + # Fallback: use the full ident if we can't parse it + return ident + + +def accumulate(rows): + """ + Build nested dict: setup -> version -> operation -> time_ns + """ + data = defaultdict(lambda: defaultdict(lambda: defaultdict(float))) + + for r in rows: + group = r.get("Group", "") + ident = r.get("Id", "") + param = r.get("Parameter", "") + mean_ns = r.get("Mean(ns)", "") + + if mean_ns == "" or mean_ns is None: + continue + try: + mean_ns = float(mean_ns) + except ValueError: + continue + + version = parse_group(group) + if version is None: + continue + + operation = parse_operation(ident) + if operation is None: + continue + + setup = parse_setup(ident, param) + data[setup][version][operation] = mean_ns + + return data + + +def make_rows_for_setup(setup, v1_data, v2_data): + """ + Create rows comparing v1 and v2 for a single setup. + """ + rows = [] + + # Check if we have all operations for both versions + v1_complete = all(op in v1_data for op in OPERATIONS) + v2_complete = all(op in v2_data for op in OPERATIONS) + + if not v1_complete and not v2_complete: + return rows + + # Build row for v1 + if v1_complete: + v1_row = { + "Scheme": V1_NAME, + "Setup": setup, + } + for op in OPERATIONS: + v1_ms = ns_to_ms(v1_data[op]) + v1_row[f"{op}_display"] = fmt_ms(v1_ms) + v1_row[f"{op}_render"] = fmt_ms(v1_ms) + rows.append(v1_row) + + # Build row for v2 + if v2_complete: + v2_row = { + "Scheme": V2_NAME, + "Setup": setup, + } + for op in OPERATIONS: + v2_ms = ns_to_ms(v2_data[op]) + if v1_complete and op in v1_data: + v1_ms = ns_to_ms(v1_data[op]) + ratio = v1_ms / v2_ms if v2_ms > 0 else float("inf") + disp, rend = decorate_v2(v2_ms, ratio) + v2_row[f"{op}_display"] = disp + v2_row[f"{op}_render"] = rend + else: + v2_row[f"{op}_display"] = fmt_ms(v2_ms) + v2_row[f"{op}_render"] = fmt_ms(v2_ms) + rows.append(v2_row) + + return rows + + +def padded_table(rows): + """ + Compute widths from the plain display strings, then emit + padded Markdown rows with the render strings (HTML-styled). + """ + cols = HEADER + display_map = { + "Scheme": "Scheme", + "Setup": "Setup", + "Deal (ms)": "deal_display", + "Serialize (ms)": "serialize_display", + "Aggregate (ms)": "aggregate_display", + "Verify (ms)": "verify_display", + "Decrypt-share (ms)": "decrypt-share_display", + } + render_map = { + "Scheme": "Scheme", + "Setup": "Setup", + "Deal (ms)": "deal_render", + "Serialize (ms)": "serialize_render", + "Aggregate (ms)": "aggregate_render", + "Verify (ms)": "verify_render", + "Decrypt-share (ms)": "decrypt-share_render", + } + + widths = {c: len(c) for c in cols} + for r in rows: + for c in cols: + widths[c] = max(widths[c], len(str(r.get(display_map[c], "")))) + + right_cols = { + "Deal (ms)", + "Serialize (ms)", + "Aggregate (ms)", + "Verify (ms)", + "Decrypt-share (ms)", + } + + def pad(c, s, align): + s = str(s) + if align == "right": + return " " + s.rjust(widths[c]) + " " + return " " + s.ljust(widths[c]) + " " + + header_line = "|" + "|".join(pad(c, c, "left") for c in cols) + "|" + sep_line = "|" + "|".join("-" * (widths[c] + 2) for c in cols) + "|" + + body_lines = [] + for r in rows: + cells = [] + for c in cols: + align = "right" if c in right_cols else "left" + # use display to compute width, render for content + content = r.get(render_map[c], "") + cells.append(pad(c, content, align)) + body_lines.append("|" + "|".join(cells) + "|") + + return "\n".join([header_line, sep_line] + body_lines) + + +def main(): + # Read CSV from file or stdin + if len(sys.argv) > 1 and sys.argv[1] != "-": + with open(sys.argv[1], newline="") as f: + rows = list(read_rows(f)) + else: + rows = list(read_rows(sys.stdin)) + + data = accumulate(rows) + + if not data: + print("No PVSS benchmark data found!", file=sys.stderr) + sys.exit(1) + + # Generate a separate table for each setup + setups = sorted(data.keys()) + tables = [] + + for setup in setups: + v1_data = data[setup].get("v1", {}) + v2_data = data[setup].get("v2", {}) + tbl_rows = make_rows_for_setup(setup, v1_data, v2_data) + + if tbl_rows: + tables.append(padded_table(tbl_rows)) + + if not tables: + print("No complete benchmark data found!", file=sys.stderr) + sys.exit(1) + + # Print all tables separated by double newlines + print("\n\n".join(tables)) + + +if __name__ == "__main__": + main() + diff --git a/crates/aptos-crypto/benches/print-markdown-table.py b/crates/aptos-crypto/benches/print-range-proof-markdown-table.py similarity index 100% rename from crates/aptos-crypto/benches/print-markdown-table.py rename to crates/aptos-crypto/benches/print-range-proof-markdown-table.py diff --git a/crates/aptos-crypto/benches/run-pvss-benches.sh b/crates/aptos-crypto/benches/run-pvss-benches.sh new file mode 100755 index 0000000000000..95e577740b553 --- /dev/null +++ b/crates/aptos-crypto/benches/run-pvss-benches.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -e + +scriptdir=$(cd $(dirname $0); pwd -P) + +repo_root=$(readlink -f $scriptdir/../../../) +#echo "Repo root: $repo_root" + +read -p "Delete past benchmark results and re-run? (Otherwise, will use extant data in $repo_root/target/criterion) [y/N]: " ANS + +if [ "$ANS" == "y" ]; then + cd $repo_root/crates/aptos-dkg/ + + echo "Cleaning previous chunky_v1 criterion benchmark results..." + rm -rf $repo_root/target/criterion/pvss_chunky_v1* + + echo "Benchmarking chunky_v1 (with RAYON_NUM_THREADS=1)..." + RAYON_NUM_THREADS=1 cargo bench --bench pvss -- pvss/chunky_v1/bls12-381 + + echo "Cleaning previous chunky_v2 criterion benchmark results..." + rm -rf $repo_root/target/criterion/pvss_chunky_v2* + + echo "Benchmarking chunky_v2 (with RAYON_NUM_THREADS=1)..." + RAYON_NUM_THREADS=1 cargo bench --bench pvss -- pvss/chunky_v2/bls12-381 + + cd - &>/dev/null +else + echo "Using existing benchmark data from $repo_root/target/criterion" + echo "WARNING: Make sure this data was generated with RAYON_NUM_THREADS=1" +fi + +cd $repo_root +csv_data=`cargo criterion-means | grep -E '^(Group|pvss_chunky_v1|pvss_chunky_v2)'` + +csv_file=`mktemp` +echo "$csv_data" >$csv_file +echo "Wrote CSV file to $csv_file..." + +md_tables=`$scriptdir/print-pvss-markdown-table.py $csv_file` + +echo "$md_tables" + +echo "$md_tables" | pbcopy +echo +echo "Copied to clipboard!" diff --git a/crates/aptos-crypto/benches/run-range-proof-benches.sh b/crates/aptos-crypto/benches/run-range-proof-benches.sh index aa0cec70d6e02..6f3fe7ef5d329 100755 --- a/crates/aptos-crypto/benches/run-range-proof-benches.sh +++ b/crates/aptos-crypto/benches/run-range-proof-benches.sh @@ -11,17 +11,19 @@ read -p "Delete past benchmark results and re-run? (Otherwise, will use extant d if [ "$ANS" == "y" ]; then echo "Cleaning previous Bulletproof criterion benchmark results..." - rm -r $repo_root/target/criterion/bulletproofs + rm -rf $repo_root/target/criterion/bulletproofs echo "Benchmarking Bulletproofs..." - RAYON_NUM_THREADS=1 cargo bench -- bulletproofs + RAYON_NUM_THREADS=1 cargo bench --bench bulletproofs echo "Cleaning previous DeKART criterion benchmark results..." - rm -r $repo_root/target/criterion/dekart* + rm -rf $repo_root/target/criterion/dekart* echo "Benchmarking DeKART..." cd $repo_root/crates/aptos-dkg/ - RAYON_NUM_THREADS=1 cargo bench -- dekart-rs/bls12-381 + RAYON_NUM_THREADS=1 cargo bench --bench range_proof -- dekart-rs/bls12-381 +# RAYON_NUM_THREADS=1 cargo bench --bench range_proof -- dekart-multivar/bn254 +# RAYON_NUM_THREADS=1 cargo bench --bench range_proof -- dekart-multivar/bls12-381 cd - &>/dev/null fi @@ -29,10 +31,10 @@ cd $repo_root csv_data=`cargo criterion-means | grep -E '^(bulletproofs|dekart-rs|Group)'` csv_file=`mktemp` -echo "Wrote CSV file to $csv_file..." echo "$csv_data" >$csv_file +echo "Wrote CSV file to $csv_file..." -md_tables=`$scriptdir/print-markdown-table.py $csv_file` +md_tables=`$scriptdir/print-range-proof-markdown-table.py $csv_file` echo "$md_tables" diff --git a/crates/aptos-crypto/src/arkworks/mod.rs b/crates/aptos-crypto/src/arkworks/mod.rs index 126b1d6a90deb..3789beac72dca 100644 --- a/crates/aptos-crypto/src/arkworks/mod.rs +++ b/crates/aptos-crypto/src/arkworks/mod.rs @@ -10,6 +10,7 @@ pub mod random; pub mod scrape; pub mod serialization; pub mod shamir; +pub mod srs; pub mod vanishing_poly; pub mod weighted_sum; @@ -19,7 +20,7 @@ use ark_poly::EvaluationDomain; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; /// A pair of canonical group generators for a pairing-friendly elliptic curve. -#[derive(CanonicalSerialize, CanonicalDeserialize, Debug, Clone, PartialEq, Eq)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Debug, Clone, Copy, PartialEq, Eq)] pub struct GroupGenerators { /// The generator of the G₁ group (affine coordinates). pub g1: E::G1Affine, diff --git a/crates/aptos-crypto/src/arkworks/random.rs b/crates/aptos-crypto/src/arkworks/random.rs index e88f1ff190887..8dce2c3a15b81 100644 --- a/crates/aptos-crypto/src/arkworks/random.rs +++ b/crates/aptos-crypto/src/arkworks/random.rs @@ -7,6 +7,7 @@ //! `rand` crate, which may differ from the version used by `arkworks` and thus //! would not be accepted directly. +use crate::arkworks::hashing; use ark_ec::CurveGroup; use ark_ff::PrimeField; use rand::Rng; @@ -19,7 +20,7 @@ pub trait UniformRand { /// NOTE: This function is "unsafe" in the sense that the caller learns the discrete log of the /// random point w.r.t. the generator. In many applications, this is not acceptable. -/// Benches suggest that when generating a single point, it's roughly as fast as `unsafe_random_point()` - about 30µs +/// Benches suggest that when generating a *single* point, it's roughly as fast as `unsafe_random_point()` - about 30µs pub fn unsafe_random_point_slow(rng: &mut R) -> C where R: rand_core::RngCore + rand_core::CryptoRng, @@ -43,8 +44,8 @@ where } /// Faster "unsafe" random point by hashing some random bytes to the curve -/// But still not very fast // TODO: make proper benchmarks, then probably remove the other version -pub fn unsafe_random_point(rng: &mut R) -> C +/// But still not very fast +pub fn unsafe_random_point(rng: &mut R) -> C::Affine where R: rand_core::RngCore + rand_core::CryptoRng, { @@ -53,19 +54,34 @@ where rng.fill_bytes(&mut buf); // Hash to curve (using unsafe_hash_to_affine) - let p: C::Affine = - crate::arkworks::hashing::unsafe_hash_to_affine(&buf, b"unsafe_random_point"); + hashing::unsafe_hash_to_affine(&buf, b"unsafe_random_point") +} - p.into() // TODO: change signature to output affine +/// Very similar, but turns affine element into group element for convenience +pub fn unsafe_random_point_group(rng: &mut R) -> C +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + unsafe_random_point::(rng).into() } /// Samples `n` uniformly random elements from the group, but is somewhat unsafe /// because it involves a hashing function which is sensitive to timing attacks -pub fn unsafe_random_points(n: usize, rng: &mut R) -> Vec +pub fn unsafe_random_points(n: usize, rng: &mut R) -> Vec where R: rand_core::RngCore + rand_core::CryptoRng, { - (0..n).map(|_| unsafe_random_point::(rng)).collect() + (0..n).map(|_| unsafe_random_point::(rng)).collect() +} + +/// Very similar, but turns affine elements into group elements for convenience +pub fn unsafe_random_points_group(n: usize, rng: &mut R) -> Vec +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + (0..n) + .map(|_| unsafe_random_point::(rng).into()) + .collect() } /// Samples `n` uniformly random elements from the prime field `F`. diff --git a/crates/aptos-crypto/src/arkworks/serialization.rs b/crates/aptos-crypto/src/arkworks/serialization.rs index 265d96340aa7e..eb63c2aad2875 100644 --- a/crates/aptos-crypto/src/arkworks/serialization.rs +++ b/crates/aptos-crypto/src/arkworks/serialization.rs @@ -1,9 +1,13 @@ // Copyright (c) Aptos Foundation // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE -//! Copied from https://github.com/arkworks-rs/algebra/issues/178#issuecomment-1413219278 +//! Adapted from https://github.com/arkworks-rs/algebra/issues/178#issuecomment-1413219278 -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; +use ark_ec::pairing::Pairing; +use ark_serialize::{ + CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError, Validate, Write, +}; +use bytes::{Buf, Bytes}; /// Serializes a type implementing `CanonicalSerialize` into bytes (with compression) using the /// [`ark_serialize`](https://docs.rs/ark-serialize) format and writes it to a Serde serializer. @@ -28,7 +32,77 @@ pub fn ark_de<'de, D, A: CanonicalDeserialize>(data: D) -> Result where D: serde::de::Deserializer<'de>, { - let s: Vec = serde::de::Deserialize::deserialize(data)?; - let a = A::deserialize_with_mode(s.as_slice(), Compress::Yes, Validate::Yes); + let s: Bytes = serde::de::Deserialize::deserialize(data)?; + let a = A::deserialize_with_mode(s.reader(), Compress::Yes, Validate::Yes); a.map_err(serde::de::Error::custom) } + +/// TODO: Not sure this is a good idea, will probably remove it in the next PR? +pub trait BatchSerializable { + /// Collect *all* curve elements in canonical order + fn collect_points(&self, g1: &mut Vec, g2: &mut Vec); + + /// Serialize using already-normalized affine points + fn serialize_from_affine( + &self, + writer: &mut W, + compress: Compress, + g1_iter: &mut impl Iterator, + g2_iter: &mut impl Iterator, + ) -> Result<(), SerializationError>; +} + +/// This module contains unit tests for serializing and deserializing +/// elliptic curve points on the BN254 curve using Serde with custom +/// serialization and deserialization functions (`ark_se` and `ark_de`). +#[cfg(test)] +pub mod tests { + use super::*; + use ark_bn254::{G1Affine, G1Projective, G2Affine, G2Projective}; + use ark_ec::{AffineRepr as _, PrimeGroup}; + use serde::{Deserialize, Serialize}; + + const MAX_DOUBLINGS: usize = 5; // Test 1G, 2G, 4G, 8G, 16G + + #[test] + fn test_g1_serialization_multiple_points() { + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct A(#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] G1Affine); + + let mut points = vec![G1Affine::zero()]; // Include zero + let mut g = G1Projective::generator(); + + for _ in 0..MAX_DOUBLINGS { + points.push(g.into()); + g += g; // double for next + } + + for p in points { + let serialized = bcs::to_bytes(&A(p)).expect("Serialization failed"); + let deserialized: A = bcs::from_bytes(&serialized).expect("Deserialization failed"); + + assert_eq!(deserialized.0, p, "G1 point round-trip failed for {:?}", p); + } + } + + #[test] + fn test_g2_serialization_multiple_points() { + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct A(#[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] G2Affine); + + let mut points = vec![G2Affine::zero()]; // Include zero + let mut g = G2Projective::generator(); + + for _ in 0..MAX_DOUBLINGS { + points.push(g.into()); + g += g; // double for next + } + + for p in points { + let serialized = bcs::to_bytes(&A(p)).expect("Serialization failed"); + let deserialized: A = bcs::from_bytes(&serialized).expect("Deserialization failed"); + + assert_eq!(deserialized.0, p, "G2 point round-trip failed for {:?}", p); + } + } +} diff --git a/crates/aptos-crypto/src/arkworks/shamir.rs b/crates/aptos-crypto/src/arkworks/shamir.rs index ff0579b01def2..7d6ea273c52c0 100644 --- a/crates/aptos-crypto/src/arkworks/shamir.rs +++ b/crates/aptos-crypto/src/arkworks/shamir.rs @@ -180,18 +180,24 @@ pub fn all_lagrange_denominators( ) -> Vec { // A(X) = \prod_{i \in [0, n-1]} (X - \omega^i) let omegas: Vec = dom.elements().take(n).collect(); - debug_assert_eq!(F::ONE, omegas[0]); - for i in 1..n { - debug_assert_eq!( - omegas[i - 1] * omegas[1], - omegas[i], - "omegas are not in sequence at index {}", - i - ); + #[cfg(debug_assertions)] + { + assert_eq!(F::ONE, omegas[0]); + for i in 1..n { + assert_eq!( + omegas[i - 1] * omegas[1], + omegas[i], + "omegas are not in sequence at index {}", + i + ); + } } + // use std::time::Instant; + // let start = Instant::now(); // This is **not** X^n - 1, because the \omega^i are not n-th roots of unity, they are N-th roots of unity where N is some power of 2 let mut A = vanishing_poly::from_roots(&omegas); + // println!("vanishing_poly::from_roots took {:?}", start.elapsed()); // A'(X) = \sum_{i \in [0, n-1]} \prod_{j \ne i, j \in [0, n-1]} (X - \omega^j) A.differentiate_in_place(); diff --git a/crates/aptos-crypto/src/arkworks/srs.rs b/crates/aptos-crypto/src/arkworks/srs.rs new file mode 100644 index 0000000000000..d51225487434b --- /dev/null +++ b/crates/aptos-crypto/src/arkworks/srs.rs @@ -0,0 +1,168 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +//! Structured Reference String (SRS) utilities. +//! +//! This module defines data structures and helpers for working with +//! Structured Reference Strings (SRS) used in pairing-based and +//! polynomial-commitment–style cryptographic protocols. + +use crate::utils; +use ark_ec::CurveGroup; +use ark_ff::Field; +use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; +use ark_serialize::{ + CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, + Write, +}; + +/// Represents the type of Structured Reference String (SRS) basis. +/// +/// This enum is a lightweight discriminator for *selecting* which SRS construction to use. +pub enum SrsType { + /// The SRS should use a Lagrange basis. + Lagrange, + /// The SRS should use a Powers-of-Tau basis. + PowersOfTau, +} + +/// A concrete representation of a Structured Reference String (SRS). +/// +/// This enum stores the actual group elements defining an SRS, parameterized +/// by an affine curve representation. Each variant corresponds to a different +/// basis commonly used in e.g. polynomial commitment schemes. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum SrsBasis { + /// The SRS is represented in the Lagrange basis. + Lagrange { + /// The vector `[G·ℓ_0(τ), G·ℓ_1(τ), G·ℓ_2(τ), …]`, where `G` is a fixed generator. + lagr: Vec, + }, + + /// The SRS is represented in the Powers-of-Tau basis. + PowersOfTau { + /// The vector `[G, G·τ, G·τ², …]`, where `G` is a fixed generator. + tau_powers: Vec, + }, +} + +// Enums need to be (de)serialised manually +impl CanonicalSerialize for SrsBasis { + fn serialize_with_mode( + &self, + mut writer: W, + compress: Compress, + ) -> Result<(), SerializationError> { + match self { + SrsBasis::Lagrange { lagr: lagr_g1 } => { + 0u8.serialize_with_mode(&mut writer, compress)?; // variant tag + lagr_g1.serialize_with_mode(&mut writer, compress)?; + }, + SrsBasis::PowersOfTau { + tau_powers: tau_powers_g1, + } => { + 1u8.serialize_with_mode(&mut writer, compress)?; // variant tag + tau_powers_g1.serialize_with_mode(&mut writer, compress)?; + }, + } + Ok(()) + } + + fn serialized_size(&self, compress: Compress) -> usize { + 1 + match self { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1.serialized_size(compress), + SrsBasis::PowersOfTau { + tau_powers: tau_powers_g1, + } => tau_powers_g1.serialized_size(compress), + } + } +} + +impl Valid for SrsBasis { + fn check(&self) -> Result<(), SerializationError> { + match self { + SrsBasis::Lagrange { lagr: lagr_g1 } => { + for g in lagr_g1 { + g.check()?; + } + }, + SrsBasis::PowersOfTau { + tau_powers: tau_powers_g1, + } => { + for g in tau_powers_g1 { + g.check()?; + } + }, + } + Ok(()) + } +} + +impl CanonicalDeserialize for SrsBasis { + fn deserialize_with_mode( + mut reader: R, + compress: Compress, + validate: Validate, + ) -> Result { + // Read the variant tag first + let tag = u8::deserialize_with_mode(&mut reader, compress, validate)?; + + match tag { + 0 => { + // Lagrange variant + let lagr = + Vec::::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(SrsBasis::Lagrange { lagr }) + }, + 1 => { + // Powers-of-Tau variant + let tau_powers = + Vec::::deserialize_with_mode(&mut reader, compress, validate)?; + Ok(SrsBasis::PowersOfTau { tau_powers }) + }, + _ => Err(SerializationError::InvalidData), + } + } +} + +/// Constructs a Structured Reference String (SRS) in the Lagrange basis. +/// +/// This function generates the sequence: +/// `[G·ℓ_0(τ), G·ℓ_1(τ), G·ℓ_2(τ), …, G·ℓ_{n - 1}(τ)]`, +/// returning the result in affine form. +#[allow(non_snake_case)] +pub fn lagrange_basis( + G: C, + tau: C::ScalarField, + n: usize, + eval_dom: Radix2EvaluationDomain, +) -> Vec { + let powers_of_tau = utils::powers(tau, n); + let lagr_basis_scalars = eval_dom.ifft(&powers_of_tau); + debug_assert!(lagr_basis_scalars.iter().sum::() == C::ScalarField::ONE); + + G.batch_mul(&lagr_basis_scalars) +} + +/// Constructs a Structured Reference String (SRS) in the Powers-of-Tau basis. +/// +/// This function generates the sequence: +/// `[G, G·τ, G·τ², …, G·τ^(n - 1)]`, +/// returning the result in affine form. +#[allow(non_snake_case)] +pub fn powers_of_tau(G: C, tau: C::ScalarField, n: usize) -> Vec { + // We have to work over `CurveGroup` instead of `AffineRepr` here and in the above function `lagrange_basis()` + // because for some reason only the former has `batch_mul()` implemented for its elements, and this is much + // faster than doing the naive approach: + // + // let mut proj = Vec::with_capacity(n); + // proj.push(base.into_group()); + // for i in 0..(n - 1) { + // proj.push(proj[i] * tau); + // } + // A::Group::normalize_batch(&proj) + + let powers_of_tau = utils::powers(tau, n); + + G.batch_mul(&powers_of_tau) +} diff --git a/crates/aptos-crypto/src/arkworks/vanishing_poly.rs b/crates/aptos-crypto/src/arkworks/vanishing_poly.rs index f87173a723565..e4788c4033118 100644 --- a/crates/aptos-crypto/src/arkworks/vanishing_poly.rs +++ b/crates/aptos-crypto/src/arkworks/vanishing_poly.rs @@ -3,9 +3,11 @@ //! Auxiliary function for Lagrange interpolation -use ark_ff::FftField; +use ark_ff::{FftField, Field}; use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; +const FFT_THRESH: usize = 64 * 16; // Given that our `n` is small in practice, we can increase this further, doesn't matter + /// Recursively computes the **vanishing polynomial** for a given set of points /// using a divide-and-conquer approach. /// @@ -14,19 +16,56 @@ use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; /// ```text /// V(x_i) = 0 for all x_i in xs /// ``` +/// In other words, V(X) = \prod_{x_i in xs} (X - x_i) pub fn from_roots(roots: &[F]) -> DensePolynomial { match roots.len() { - 0 => DensePolynomial::from_coefficients_vec(vec![F::one()]), // Empty product = 1 - 1 => DensePolynomial::from_coefficients_vec(vec![-roots[0], F::one()]), // Single root + 0 => DensePolynomial::from_coefficients_vec(vec![F::one()]), // Is this correct? F::one() or empty vec? + 1 => DensePolynomial::from_coefficients_vec(vec![-roots[0], F::one()]), + 2 => { + let (a, b) = (roots[0], roots[1]); + DensePolynomial::from_coefficients_vec(vec![a * b, -(a + b), F::one()]) + }, + 3 => { + let (a, b, c) = (roots[0], roots[1], roots[2]); + DensePolynomial::from_coefficients_vec(vec![ + -(a * b * c), + a * b + a * c + b * c, + -(a + b + c), + F::one(), + ]) + }, // Not sure 2 and 3 are really useful _ => { let mid = roots.len() / 2; - let left = from_roots(&roots[..mid]); - let right = from_roots(&roots[mid..]); - &left * &right // This uses FftField + let (left, right) = + rayon::join(|| from_roots(&roots[..mid]), || from_roots(&roots[mid..])); + + let result_len = left.coeffs.len() + right.coeffs.len() - 1; + let dom_size = result_len.next_power_of_two(); + + if dom_size < FFT_THRESH { + naive_poly_mul(&left, &right) + } else { + &left * &right + } }, } } +fn naive_poly_mul(a: &DensePolynomial, b: &DensePolynomial) -> DensePolynomial { + let a_coeffs = &a.coeffs; + let b_coeffs = &b.coeffs; + + let mut out = vec![F::zero(); a_coeffs.len() + b_coeffs.len() - 1]; + + for (i, ai) in a_coeffs.iter().enumerate() { + for (j, bj) in b_coeffs.iter().enumerate() { + out[i + j] += *ai * *bj; + } + } + + DensePolynomial::from_coefficients_vec(out) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/aptos-crypto/src/blstrs/lagrange.rs b/crates/aptos-crypto/src/blstrs/lagrange.rs index a4f5df268be67..22641450a05d2 100644 --- a/crates/aptos-crypto/src/blstrs/lagrange.rs +++ b/crates/aptos-crypto/src/blstrs/lagrange.rs @@ -13,7 +13,7 @@ use ff::{BatchInvert, Field}; use more_asserts::{assert_gt, debug_assert_le}; use std::ops::{Mul, MulAssign}; -const FFT_THRESH: usize = 64; +const FFT_THRESH: usize = 64 * 2; // It was previously set to 64 /// Returns all the $N$ Lagrange coefficients for the interpolating set $T = \{\omega^0, \omega^1, \ldots, \omega^{N-1}\}$, /// where $\omega$ is an $N$th root of unity and $N$ is the size of `dom`. diff --git a/crates/aptos-crypto/src/error.rs b/crates/aptos-crypto/src/error.rs deleted file mode 100644 index 242aceab1ded8..0000000000000 --- a/crates/aptos-crypto/src/error.rs +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) Aptos Foundation -// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE - -//! Rexport the error types needed for the various crypto traits - -pub use anyhow::{bail, Error}; diff --git a/crates/aptos-crypto/src/lib.rs b/crates/aptos-crypto/src/lib.rs index 43eee217be3f8..eb94547589196 100644 --- a/crates/aptos-crypto/src/lib.rs +++ b/crates/aptos-crypto/src/lib.rs @@ -14,7 +14,6 @@ pub mod constant_time; pub mod ed25519; pub mod elgamal; pub mod encoding_type; -pub mod error; pub mod hash; pub mod hkdf; pub mod input_secret; diff --git a/crates/aptos-crypto/src/utils.rs b/crates/aptos-crypto/src/utils.rs index da4a8dec6cae8..a839842d5de4f 100644 --- a/crates/aptos-crypto/src/utils.rs +++ b/crates/aptos-crypto/src/utils.rs @@ -23,3 +23,12 @@ where powers } + +/// Asserts that the given value is a power of two. +pub fn assert_power_of_two(n: usize) { + assert!( + n.is_power_of_two(), + "Parameter must be a power of 2, but got {}", + n + ); +} diff --git a/crates/aptos-dkg/Cargo.toml b/crates/aptos-dkg/Cargo.toml index ed0f7308c1913..3b032bd2350f4 100644 --- a/crates/aptos-dkg/Cargo.toml +++ b/crates/aptos-dkg/Cargo.toml @@ -24,6 +24,7 @@ derive_more = { workspace = true } ff = { workspace = true } group = { workspace = true } hex = { workspace = true } +itertools = { workspace = true } merlin = { workspace = true } more-asserts = { workspace = true } num-bigint = { workspace = true } @@ -58,6 +59,14 @@ harness = false name = "lagrange" harness = false +[[bench]] +name = "ldt" +harness = false + +[[bench]] +name = "pcs" +harness = false + [[bench]] name = "pvss" harness = false @@ -66,6 +75,10 @@ harness = false name = "range_proof" harness = false +[[bench]] +name = "serialization" +harness = false + [[bench]] name = "weighted_vuf" harness = false diff --git a/crates/aptos-dkg/benches/ldt.rs b/crates/aptos-dkg/benches/ldt.rs new file mode 100644 index 0000000000000..7a492c689306a --- /dev/null +++ b/crates/aptos-dkg/benches/ldt.rs @@ -0,0 +1,67 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +#![allow(clippy::ptr_arg)] +#![allow(clippy::needless_borrow)] + +use aptos_crypto::arkworks; +use aptos_dkg::{ + algebra::evaluation_domain::BatchEvaluationDomain, + pvss::{test_utils::BENCHMARK_CONFIGS, LowDegreeTest}, +}; +use ark_poly::EvaluationDomain; +use criterion::{criterion_group, criterion_main, Criterion}; +use rand::thread_rng; + +pub fn all_groups(c: &mut Criterion) { + arkworks_ldt_group(c); + blstrs_ldt_group(c); +} + +pub fn blstrs_ldt_group(c: &mut Criterion) { + let mut rng = thread_rng(); + let mut group = c.benchmark_group("blstrs_ldt"); + + for &(t, n) in BENCHMARK_CONFIGS { + group.bench_function(format!("dual_code_word/t{}/n{}", t, n), |b| { + b.iter_with_setup( + || { + let batch_dom = BatchEvaluationDomain::new(n); + (n, t, batch_dom) + }, + |(n, t, batch_dom)| { + let ldt = LowDegreeTest::random(&mut rng, t, n, true, &batch_dom); + ldt.dual_code_word(); + }, + ) + }); + } +} + +pub fn arkworks_ldt_group(c: &mut Criterion) { + let mut rng = thread_rng(); + let mut group = c.benchmark_group("arkworks_ldt"); + + for &(t, n) in BENCHMARK_CONFIGS { + group.bench_function(format!("dual_code_word/t{}/n{}", t, n), |b| { + b.iter_with_setup( + || { + let batch_dom = + ark_poly::Radix2EvaluationDomain::::new(n).unwrap(); + (n, t, batch_dom) + }, + |(n, t, batch_dom)| { + let ldt = + arkworks::scrape::LowDegreeTest::random(&mut rng, t, n, true, &batch_dom); + ldt.dual_code_word(); + }, + ) + }); + } +} + +criterion_group!( + name = benches; + config = Criterion::default().sample_size(10); + targets = all_groups); +criterion_main!(benches); diff --git a/crates/aptos-dkg/benches/pcs.rs b/crates/aptos-dkg/benches/pcs.rs new file mode 100644 index 0000000000000..1d9102ded8142 --- /dev/null +++ b/crates/aptos-dkg/benches/pcs.rs @@ -0,0 +1,125 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +use aptos_dkg::pcs::{ + traits::{random_point, random_poly, PolynomialCommitmentScheme}, + zeromorph::Zeromorph, +}; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use rand::{rngs::StdRng, SeedableRng}; +use std::hint::black_box; + +/// Generic benchmark harness for any commitment scheme implementing `CommitmentScheme`. +/// +/// Benchmarks the three core operations: +/// - Commit +/// - Open (prove an evaluation) +/// - Verify (verify the opening proof) +fn benchmark_commitment_scheme(c: &mut Criterion) { + // Create a benchmark group labeled with the scheme name + let mut group = c.benchmark_group(format!( + "commitment_scheme/{}", + String::from_utf8_lossy(CS::scheme_name()) + )); + + // Polynomial sizes to benchmark (powers of two); corresponds to rounding up [1_000, 10_000, 100_000, 1_000_000] + let sizes: [u32; 4] = [1 << 10, 1 << 14, 1 << 17, 1 << 20]; + + for len in sizes { + // Use a fixed seed so all runs are deterministic and comparable + let mut rng = StdRng::seed_from_u64(0xDEAD_BEEF); + + // Number of variables needed to represent a multilinear polynomial of length `len` + let num_vars = len.next_power_of_two().ilog2(); + + // -------------------------------------------------- + // Setup phase (trusted / structured reference string) + // -------------------------------------------------- + // This is intentionally done once per size and excluded from benchmarks; becomes quite slow for large `num_vars` + println!("Computing setup..."); + let (ck, vk) = CS::setup(vec![1; num_vars as usize], &mut rng); + println!("Finished setup"); + + // ------------------------------------------ + // Benchmark Commit + // ------------------------------------------ + // Measures the cost of committing to a polynomial + group.bench_with_input(BenchmarkId::new("commit", len), &len, |b, &_len| { + b.iter_batched( + || random_poly::(&mut rng, len, 32), + |poly| { + CS::commit(&ck, poly, None); + }, + BatchSize::LargeInput, + ); + }); + + // ------------------------------------------ + // Benchmark Open + // ------------------------------------------ + // Measures the cost of generating an evaluation proof + group.bench_with_input(BenchmarkId::new("open", len), &len, |b, &_len| { + b.iter_batched( + || { + let poly = random_poly::(&mut rng, len, 32); + let challenge = random_point::(&mut rng, num_vars); + let mut rng = rand::thread_rng(); + let r = CS::random_witness(&mut rng); + let trs = merlin::Transcript::new(b"pcs-bench"); + (poly, challenge, Some(r), rng, trs) + }, + |(poly, challenge, r, mut rng, mut trs)| { + CS::open(&ck, poly, challenge, r, &mut rng, &mut trs); + }, + BatchSize::LargeInput, + ); + }); + + // ------------------------------------------ + // Benchmark Verify + // ------------------------------------------ + group.bench_with_input(BenchmarkId::new("verify", len), &len, |b, &_len| { + b.iter_batched( + || { + let poly = random_poly::(&mut rng, len, 32); + let challenge = random_point::(&mut rng, num_vars); + let val = CS::evaluate_point(&poly, &challenge); + let com = CS::commit(&ck, poly.clone(), None); + let mut rng = rand::thread_rng(); + let r = CS::random_witness(&mut rng); + let mut trs = merlin::Transcript::new(b"pcs-bench"); + let proof = CS::open( + &ck, + poly.clone(), + challenge.clone(), + Some(r), + &mut rng, + &mut trs, + ); + (challenge, val, com, proof, trs) + }, + |(challenge, val, com, proof, mut trs)| { + let _ = CS::verify(black_box(&vk), com, challenge, val, proof, &mut trs); + }, + BatchSize::LargeInput, + ); + }); + } + + group.finish(); +} + +/// Benchmark entry point for the Zeromorph commitment scheme instantiated +/// over the BLS12-381 pairing-friendly curve. +fn bench_zeromorph(c: &mut Criterion) { + type E = ark_bls12_381::Bls12_381; + + benchmark_commitment_scheme::>(c); +} + +criterion_group!( + name = benches; + config = Criterion::default().sample_size(20); + targets = bench_zeromorph); + +criterion_main!(benches); diff --git a/crates/aptos-dkg/benches/pvss.rs b/crates/aptos-dkg/benches/pvss.rs index 8f56cc20b3c1e..006003007d762 100644 --- a/crates/aptos-dkg/benches/pvss.rs +++ b/crates/aptos-dkg/benches/pvss.rs @@ -5,43 +5,56 @@ #![allow(clippy::needless_borrow)] use aptos_crypto::{SecretSharingConfig, Uniform}; -use aptos_dkg::{ - algebra::evaluation_domain::BatchEvaluationDomain, - pvss::{ - chunky::UnsignedUnweightedTranscript as ChunkyTranscript, - das, - test_utils::{ - self, get_threshold_configs_for_benchmarking, get_weighted_configs_for_benchmarking, - DealingArgs, NoAux, BENCHMARK_CONFIGS, - }, - traits::transcript::{ - Aggregatable, AggregatableTranscript, HasAggregatableSubtranscript, - MalleableTranscript, Transcript, WithMaxNumShares, - }, - LowDegreeTest, WeightedConfigBlstrs, +use aptos_dkg::pvss::{ + chunky::{UnsignedWeightedTranscript as Chunky_v1, UnsignedWeightedTranscriptv2 as Chunky_v2}, + das, + test_utils::{ + self, get_threshold_configs_for_benchmarking, get_weighted_configs_for_benchmarking, + DealingArgs, NoAux, }, + traits::transcript::{ + Aggregatable, AggregatableTranscript, HasAggregatableSubtranscript, MalleableTranscript, + Transcript, WithMaxNumShares, + }, + WeightedConfigBlstrs, }; +use ark_bls12_381::Bls12_381; use ark_bn254::Bn254; use criterion::{ - criterion_group, criterion_main, + black_box, criterion_group, criterion_main, measurement::{Measurement, WallTime}, BenchmarkGroup, Criterion, Throughput, }; use more_asserts::assert_le; use rand::{rngs::ThreadRng, thread_rng, Rng}; +const BN254: &str = "bn254"; +const BLS12_381: &str = "bls12-381"; + pub fn all_groups(c: &mut Criterion) { - // unweighted BN254 PVSS with aggregatable subtranscript; only doing 2 because large configs are a bit slow and not relevant anyway - for tc in get_threshold_configs_for_benchmarking().into_iter().take(2) { - subaggregatable_pvss_group::>(&tc, c); + println!("Rayon num threads: {}", rayon::current_num_threads()); + + // weighted PVSS with aggregatable subtranscript; only doing one at the moment because large configs are a bit slow and not relevant anyway + for tc in get_weighted_configs_for_benchmarking().into_iter().take(1) { + subaggregatable_pvss_group::>(&tc, c, BN254); + } + for tc in get_weighted_configs_for_benchmarking().into_iter().take(1) { + subaggregatable_pvss_group::>(&tc, c, BLS12_381); + } + + for tc in get_weighted_configs_for_benchmarking().into_iter().take(1) { + subaggregatable_pvss_group::>(&tc, c, BN254); + } + for tc in get_weighted_configs_for_benchmarking().into_iter().take(1) { + subaggregatable_pvss_group::>(&tc, c, BLS12_381); } - // unweighted aggregatable PVSS + // unweighted aggregatable PVSS, `blstrs` only so this is BLS12-381 for tc in get_threshold_configs_for_benchmarking() { aggregatable_pvss_group::(&tc, c); } - // weighted PVSS + // weighted aggregatable PVSS, `blstrs` only so this is BLS12-381 for wc in get_weighted_configs_for_benchmarking() { let d = aggregatable_pvss_group::(&wc, c); weighted_pvss_group(&wc, d, c); @@ -50,30 +63,6 @@ pub fn all_groups(c: &mut Criterion) { // let d = pvss_group::>(&wc, c); // weighted_pvss_group(&wc, d, c); } - - // LDT - ldt_group(c); -} - -// TODO: benchmark both blstrs and arkworks LDT? -pub fn ldt_group(c: &mut Criterion) { - let mut rng = thread_rng(); - let mut group = c.benchmark_group("ldt"); - - for &(t, n) in BENCHMARK_CONFIGS { - group.bench_function(format!("dual_code_word/t{}/n{}", t, n), |b| { - b.iter_with_setup( - || { - let batch_dom = BatchEvaluationDomain::new(n); - (n, t, batch_dom) - }, - |(n, t, batch_dom)| { - let ldt = LowDegreeTest::random(&mut rng, t, n, true, &batch_dom); - ldt.dual_code_word(); - }, - ) - }); - } } pub fn aggregatable_pvss_group( @@ -104,6 +93,7 @@ pub fn aggregatable_pvss_group( pub fn subaggregatable_pvss_group( sc: &T::SecretSharingConfig, c: &mut Criterion, + curve_name: &str, ) -> DealingArgs where T: MalleableTranscript @@ -114,7 +104,7 @@ where >, { let name = T::scheme_name(); - let mut group = c.benchmark_group(format!("pvss/{}", name)); + let mut group = c.benchmark_group(format!("pvss/{}/{}", name, curve_name)); let mut rng = thread_rng(); // TODO: use a lazy pattern to avoid this expensive step when no benchmarks are run @@ -122,6 +112,7 @@ where // pvss_transcript_random::(sc, &mut group); pvss_deal::(sc, &d.pp, &d.ssks, &d.spks, &d.eks, &mut group); + pvss_nonaggregate_serialize::(sc, &d.pp, &d.ssks, &d.spks, &d.eks, &mut group); pvss_subaggregate::(sc, &mut group); pvss_nonaggregate_verify::(sc, &d.pp, &d.ssks, &d.spks, &d.eks, &mut group); pvss_decrypt_own_share::( @@ -283,7 +274,7 @@ fn pvss_verify( }); } -fn pvss_nonaggregate_verify( +fn pvss_nonaggregate_serialize( sc: &T::SecretSharingConfig, pp: &T::PublicParameters, ssks: &[T::SigningSecretKey], @@ -295,7 +286,7 @@ fn pvss_nonaggregate_verify( let mut rng = thread_rng(); - g.bench_function(format!("verify/{}", sc), move |b| { + g.bench_function(format!("serialize/{}", sc), move |b| { b.iter_with_setup( || { let s = T::InputSecret::generate(&mut rng); @@ -311,6 +302,45 @@ fn pvss_nonaggregate_verify( &mut rng, ) }, + |trs| { + let bytes = trs.to_bytes(); + black_box(&bytes); + }, + ) + }); +} + +fn pvss_nonaggregate_verify( + sc: &T::SecretSharingConfig, + pp: &T::PublicParameters, + ssks: &[T::SigningSecretKey], + spks: &[T::SigningPubKey], + eks: &[T::EncryptPubKey], + g: &mut BenchmarkGroup, +) { + g.throughput(Throughput::Elements(sc.get_total_num_shares() as u64)); + + let mut rng = thread_rng(); + + g.bench_function(format!("verify/{}", sc), move |b| { + b.iter_with_setup( + || { + let s = T::InputSecret::generate(&mut rng); + let trs = T::deal( + &sc, + &pp, + &ssks[0], + &spks[0], + &eks, + &s, + &NoAux, + &sc.get_player(0), + &mut rng, + ); + T::try_from(trs.to_bytes().as_slice()) + .expect("serialized transcript should deserialize correctly") + // we have to serialize and deserialize because otherwise verify gets a transcript with "non-normalised" projective group elements + }, |trx| { trx.verify(&sc, &pp, &[spks[0].clone()], &eks, &NoAux) .expect("PVSS transcript verification should succeed"); diff --git a/crates/aptos-dkg/benches/range_proof.rs b/crates/aptos-dkg/benches/range_proof.rs index 0922b3ca4e065..b27bec744b3c9 100644 --- a/crates/aptos-dkg/benches/range_proof.rs +++ b/crates/aptos-dkg/benches/range_proof.rs @@ -21,6 +21,7 @@ use rand::thread_rng; /// `crates/aptos-crypto/README.md` rely on it. const BROKEN_DEKART_RS_SCHEME_NAME: &str = "dekart-rs-broken"; const DEKART_RS_SCHEME_NAME: &str = "dekart-rs"; +//const DEKART_MULTIVARIATE_SCHEME_NAME: &str = "dekart-multivar"; const BN254: &str = "bn254"; const BLS12_381: &str = "bls12-381"; diff --git a/crates/aptos-dkg/benches/serialization.rs b/crates/aptos-dkg/benches/serialization.rs new file mode 100644 index 0000000000000..a01440f2f3dab --- /dev/null +++ b/crates/aptos-dkg/benches/serialization.rs @@ -0,0 +1,431 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +use aptos_crypto::{ + arkworks::random::{sample_field_element, sample_field_elements, unsafe_random_points_group}, + blstrs::{ + random::{insecure_random_g1_points, insecure_random_g2_points, random_scalars}, + random_scalar, + }, +}; +use aptos_dkg::pvss::{ + chunky::chunked_elgamal::num_chunks_per_scalar, test_utils::BENCHMARK_CONFIGS, +}; +use ark_bn254::{ + Fr as ArkFr, G1Affine as ArkG1Affine, G1Projective as ArkG1Projective, G2Affine as ArkG2Affine, + G2Projective as ArkG2Projective, +}; +use ark_ec::CurveGroup; +use ark_serialize::CanonicalSerialize; +use blstrs::{ + G1Affine as BlstrsG1Affine, G1Projective as BlstrsG1Projective, G2Affine as BlstrsG2Affine, + G2Projective as BlstrsG2Projective, Scalar as BlstrsFr, +}; +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; +use ff::Field; +use group::{prime::PrimeCurveAffine, Group}; +use serde::Serialize; + +// The goal of this file is +// (i) To test how much speed is gained serializing projective group elements in `arkworks` and `blstrs` manually using a `normalize_batch()` type approach +// (ii) Whilst we're at it, see how arkworks and blstrs differ speedwise by timing the serialisation of the same amount of elts (note that we are NOT using the same curve atm) +// blstrs doesn't have a proper normalize_batch() function implemented, so we've commented out those benches for now + +// One of the results is that arkworks seems significantly slower than blstrs, at least with our settings. TODO: what happens if `Compress` and `Validate` are disabled? + +// `blstrs` seems to be missing a `normalize_batch()` implementation, so let's do that first +fn blstrs_normalize_batch_g1(v: &[BlstrsG1Projective]) -> Vec { + // Collect all z coordinates + let mut zs: Vec<_> = v.iter().map(|p| p.z()).collect(); + + // Do batch inversion + //zs.iter_mut().batch_invert(); + batch_inversion(&mut zs); + + // Convert to affine + v.iter() + .zip(zs) + .map(|(p, z_inv)| { + if bool::from(p.is_identity()) { + BlstrsG1Affine::identity() + } else { + let x = p.x() * z_inv; + let y = p.y() * z_inv; + // SAFETY: this assumes x,y is a valid affine point + BlstrsG1Affine::from_raw_unchecked(x, y, true) + } + }) + .collect() +} + +fn blstrs_normalize_batch_g2(v: &[BlstrsG2Projective]) -> Vec { + // Collect all z coordinates + let mut zs: Vec<_> = v.iter().map(|p| p.z()).collect(); + + // Do batch inversion + //zs.iter_mut().batch_invert(); + batch_inversion(&mut zs); + + // Convert to affine + v.iter() + .zip(zs) + .map(|(p, z_inv)| { + if bool::from(p.is_identity()) { + BlstrsG2Affine::identity() + } else { + let x = p.x() * z_inv; + let y = p.y() * z_inv; + // SAFETY: this assumes x,y is a valid affine point + BlstrsG2Affine::from_raw_unchecked(x, y, true) + } + }) + .collect() +} + +// Furthermore, this function seems to be slightly faster than the built-in `batch_invert()`, probably +// because it's not constant-time, but that's irrelevant for serialization +fn batch_inversion(v: &mut [F]) { + let mut acc = F::ONE; + // prefix products + let mut prod = Vec::with_capacity(v.len()); + for x in v.iter() { + prod.push(acc); + acc *= x; + } + // invert the total product + acc = acc.invert().unwrap(); // shouldn't happen, the only element with zero z-coordinate in the Weierstrass model is the identity (0 : 1 : 0) + // propagate inverses backwards + for (x, p) in v.iter_mut().rev().zip(prod.into_iter().rev()) { + let tmp = acc * *x; + *x = acc * p; + acc = tmp; + } +} + +const N: usize = 10_000; + +fn ark_g1_projective_1k( + rng: &mut R, +) -> Vec { + random_projective_vec_ark::(N, rng) +} + +fn blstrs_g1_projective_1k( + rng: &mut R, +) -> Vec { + random_projective_g1_vec_blstrs(N, rng) +} + +fn bench_arkworks_projective_1k(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + + c.bench_function("arkworks serialize 1k G1 projective", |b| { + b.iter_batched( + || ark_g1_projective_1k(&mut rng), + |v| { + let mut bytes = Vec::new(); + v.serialize_compressed(&mut bytes).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +fn bench_arkworks_affine_1k(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + + c.bench_function("arkworks serialize 1k G1 affine", |b| { + b.iter_batched( + || ark_g1_projective_1k(&mut rng), + |proj| { + let mut bytes = Vec::new(); + let v = ArkG1Projective::normalize_batch(&proj); + v.serialize_compressed(&mut bytes).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +fn bench_blstrs_projective_1k(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + + c.bench_function("blstrs serialize 1k G1 projective (BCS)", |b| { + b.iter_batched( + || blstrs_g1_projective_1k(&mut rng), + |v| { + let bytes = bcs::to_bytes(&v).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +fn bench_blstrs_affine_1k(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + + c.bench_function("blstrs serialize 1k G1 affine (BCS)", |b| { + b.iter_batched( + || blstrs_g1_projective_1k(&mut rng), + |proj| { + //let mut aff = vec![BlstrsG1Affine::generator(); N]; + let aff = blstrs_normalize_batch_g1(&proj); + let bytes = bcs::to_bytes(&aff).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +// Naive serialization of a transcript of arkworks elements +#[derive(CanonicalSerialize)] +#[allow(non_snake_case)] +struct ArkTranscriptSimulatorProj { + G1_vec: Vec, + G2_vec: Vec, + scalar_vec: Vec, // Only including this to make the benchmarks more "realistic" by making the numbers similar to chunky +} + +// Serialization of a transcript of affine arkworks elements +#[derive(CanonicalSerialize)] +#[allow(non_snake_case)] +struct ArkTranscriptSimulatorAffine { + G1_vec: Vec, + G2_vec: Vec, + scalar_vec: Vec, +} + +struct ChunkySizes { + g1: usize, + g2: usize, + scalars: usize, +} + +// TODO: Benchmarks are slightly off so there's probably a small bug in these numbers. Not very important +fn chunky_sizes() -> ChunkySizes { + let (_, n) = BENCHMARK_CONFIGS[0]; + let max_weight = 7; + let ell = 16; + + let num_chunks = num_chunks_per_scalar::(ell) as usize; + + let g2 = 2 * n + 1 + 1 + n * num_chunks + num_chunks * max_weight; + let g1 = 2 * (n * num_chunks + num_chunks * max_weight) + ell as usize + 2 + 2 + 2; + let scalars = ell as usize + 2 + 2; + + ChunkySizes { g1, g2, scalars } +} + +#[derive(Serialize)] +#[allow(non_snake_case)] +struct BlstrsTranscriptSimulatorProj { + G1_vec: Vec, + G2_vec: Vec, + scalar_vec: Vec, +} + +#[derive(Serialize)] +#[allow(non_snake_case)] +struct BlstrsTranscriptSimulatorAffine { + G1_vec: Vec, + G2_vec: Vec, + scalar_vec: Vec, +} + +struct DasSizes { + g1: usize, + g2: usize, + scalars: usize, +} + +// We ignore the SoKs +fn das_sizes() -> DasSizes { + let (_, n) = BENCHMARK_CONFIGS[0]; + + #[allow(clippy::identity_op)] + let g1 = (n + n + 1 + n) * 1; // The `* 1` is for experimenting + #[allow(clippy::identity_op)] + let g2 = (n + n + 1) * 1; + + DasSizes { g1, g2, scalars: 0 } +} + +fn random_projective_vec_ark(n: usize, rng: &mut R) -> Vec +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + unsafe_random_points_group::(n, rng) + .into_iter() + .map(|p| p * sample_field_element::(rng)) + .collect() +} + +fn random_projective_g1_vec_blstrs(n: usize, rng: &mut R) -> Vec +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + insecure_random_g1_points(n, rng) + .into_iter() + .map(|p| p * random_scalar(rng)) + .collect() +} + +fn random_projective_g2_vec_blstrs(n: usize, rng: &mut R) -> Vec +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + insecure_random_g2_points(n, rng) + .into_iter() + .map(|p| p * random_scalar(rng)) + .collect() +} + +fn random_projective_chunky_transcript( + sizes: &ChunkySizes, + rng: &mut R, +) -> ArkTranscriptSimulatorProj +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + ArkTranscriptSimulatorProj { + G1_vec: random_projective_vec_ark::(sizes.g1, rng), + G2_vec: random_projective_vec_ark::(sizes.g2, rng), + scalar_vec: sample_field_elements(sizes.scalars, rng), + } +} + +fn random_projective_das_transcript( + sizes: &DasSizes, + rng: &mut R, +) -> BlstrsTranscriptSimulatorProj +where + R: rand_core::RngCore + rand_core::CryptoRng, +{ + BlstrsTranscriptSimulatorProj { + G1_vec: random_projective_g1_vec_blstrs(sizes.g1, rng), + G2_vec: random_projective_g2_vec_blstrs(sizes.g2, rng), + scalar_vec: random_scalars(sizes.scalars, rng), + } +} + +fn affine_chunky_transcript_from_projective_transcript( + projective_transcript: &ArkTranscriptSimulatorProj, +) -> ArkTranscriptSimulatorAffine { + ArkTranscriptSimulatorAffine { + G1_vec: ArkG1Projective::normalize_batch(&projective_transcript.G1_vec), + G2_vec: ArkG2Projective::normalize_batch(&projective_transcript.G2_vec), + scalar_vec: projective_transcript.scalar_vec.clone(), + } +} + +#[allow(non_snake_case)] +fn affine_das_transcript_from_projective_transcript( + projective_transcript: &BlstrsTranscriptSimulatorProj, +) -> BlstrsTranscriptSimulatorAffine { + // let mut G1_vec = vec![BlstrsG1Affine::generator(); projective_transcript.G1_vec.len()]; + // let mut G2_vec = vec![BlstrsG2Affine::generator(); projective_transcript.G2_vec.len()]; + let G1_vec = blstrs_normalize_batch_g1(&projective_transcript.G1_vec); + let G2_vec = blstrs_normalize_batch_g2(&projective_transcript.G2_vec); + + BlstrsTranscriptSimulatorAffine { + G1_vec, + G2_vec, + scalar_vec: projective_transcript.scalar_vec.clone(), + } +} + +fn bench_projective_serialization_arkworks(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let sizes = chunky_sizes(); + + c.bench_function( + "serialize chunky transcript (projective, compressed)", + |b| { + b.iter_batched( + || random_projective_chunky_transcript(&sizes, &mut rng), + |transcript| { + let mut bytes = Vec::new(); + transcript.serialize_compressed(&mut bytes).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }, + ); +} + +fn bench_affine_serialization_arkworks(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let sizes = chunky_sizes(); + + c.bench_function("serialize chunky transcript (affine, compressed)", |b| { + b.iter_batched( + || random_projective_chunky_transcript(&sizes, &mut rng), + |projective_transcript| { + let mut bytes = Vec::new(); + + let affine_transcript = + affine_chunky_transcript_from_projective_transcript(&projective_transcript); + + affine_transcript.serialize_compressed(&mut bytes).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +fn bench_projective_serialization_blstrs(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let sizes = das_sizes(); + + c.bench_function("serialize das transcript (projective, compressed)", |b| { + b.iter_batched( + || random_projective_das_transcript(&sizes, &mut rng), + |transcript| { + let bytes = bcs::to_bytes(&transcript).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +fn bench_affine_serialization_blstrs(c: &mut Criterion) { + let mut rng = rand::thread_rng(); + let sizes = das_sizes(); + + c.bench_function("serialize das transcript (affine, compressed)", |b| { + b.iter_batched( + || random_projective_das_transcript(&sizes, &mut rng), + |projective_transcript| { + let affine_transcript = + affine_das_transcript_from_projective_transcript(&projective_transcript); + + let bytes = bcs::to_bytes(&affine_transcript).unwrap(); + black_box(bytes); + }, + BatchSize::SmallInput, + ) + }); +} + +criterion_group!( + name = benches; + config = Criterion::default().sample_size(10); + targets = + bench_arkworks_projective_1k, + bench_arkworks_affine_1k, + bench_blstrs_projective_1k, + bench_blstrs_affine_1k, + bench_projective_serialization_arkworks, + bench_affine_serialization_arkworks, + bench_projective_serialization_blstrs, + bench_affine_serialization_blstrs +); + +criterion_main!(benches); diff --git a/crates/aptos-dkg/src/fiat_shamir.rs b/crates/aptos-dkg/src/fiat_shamir.rs index f04e22cb8ac8f..735bc38423fdd 100644 --- a/crates/aptos-dkg/src/fiat_shamir.rs +++ b/crates/aptos-dkg/src/fiat_shamir.rs @@ -2,17 +2,18 @@ // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE //! For what it's worth, I don't understand why the `merlin` library wants the user to first define -//! a trait with their 'append' operations and then implement that trait on `merlin::Transcript`. +//! a trait with their 'append' operations and then implement that trait on `Transcript`. //! I also don't understand how that doesn't break the orphan rule in Rust. //! I suspect the reason they want the developer to do things these ways is to force them to cleanly //! define all the things that are appended to the transcript. use crate::{ - range_proofs::traits::BatchedRangeProof, sigma_protocol, sigma_protocol::homomorphism, Scalar, + range_proofs::traits::BatchedRangeProof, sigma_protocol, sigma_protocol::homomorphism, }; -use ark_ec::pairing::Pairing; +use ark_ec::{pairing::Pairing, CurveGroup}; use ark_ff::PrimeField; use ark_serialize::CanonicalSerialize; +use merlin::Transcript; use serde::Serialize; /// Helper trait for deriving random scalars from a transcript. @@ -24,35 +25,33 @@ use serde::Serialize; /// ⚠️ This trait is intentionally private: functions like `challenge_scalars` /// should **only** be used internally to ensure properly /// labelled scalar generation across Fiat-Shamir protocols. -// -// TODO: Again, seems that ideally Scalar should become Scalar instead trait ScalarProtocol { - fn challenge_full_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec>; + fn challenge_full_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec; - fn challenge_full_scalar(&mut self, label: &[u8]) -> Scalar { + fn challenge_full_scalar(&mut self, label: &[u8]) -> F { self.challenge_full_scalars(label, 1)[0] } - fn challenge_128bit_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec>; + fn challenge_128bit_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec; } -impl ScalarProtocol for merlin::Transcript { - fn challenge_full_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec> { +impl ScalarProtocol for Transcript { + fn challenge_full_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec { let byte_size = (F::MODULUS_BIT_SIZE as usize) / 8; let mut buf = vec![0u8; 2 * num_scalars * byte_size]; self.challenge_bytes(label, &mut buf); buf.chunks(2 * byte_size) - .map(|chunk| Scalar(F::from_le_bytes_mod_order(chunk))) + .map(|chunk| F::from_le_bytes_mod_order(chunk)) .collect() } - fn challenge_128bit_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec> { + fn challenge_128bit_scalars(&mut self, label: &[u8], num_scalars: usize) -> Vec { let mut buf = vec![0u8; num_scalars * 16]; self.challenge_bytes(label, &mut buf); buf.chunks(16) - .map(|chunk| Scalar(F::from_le_bytes_mod_order(chunk.try_into().unwrap()))) + .map(|chunk| F::from_le_bytes_mod_order(chunk.try_into().unwrap())) .collect() } } @@ -97,8 +96,17 @@ pub trait SigmaProtocol: ScalarProtocol F; } +// These may or may not need a pairing, so for we're moving the generic parameters to the methods +pub trait PolynomialCommitmentScheme { + fn append_sep(&mut self, dst: &[u8]); + + fn append_point(&mut self, point: &C); + + fn challenge_scalar(&mut self) -> F; +} + #[allow(non_snake_case)] -impl> RangeProof for merlin::Transcript { +impl> RangeProof for Transcript { fn append_sep(&mut self, dst: &[u8]) { self.append_message(b"dom-sep", dst); } @@ -123,7 +131,7 @@ impl> RangeProof for merlin::Transcrip commitment .serialize_compressed(&mut commitment_bytes) .expect("hat_f_commitment serialization should succeed"); - self.append_message(b"hat_f_commitment", commitment_bytes.as_slice()); + self.append_message(b"hat-f-commitment", commitment_bytes.as_slice()); } fn append_sigma_proof(&mut self, sigma_proof: &A) { @@ -131,7 +139,7 @@ impl> RangeProof for merlin::Transcrip sigma_proof .serialize_compressed(&mut sigma_proof_bytes) .expect("sigma proof serialization should succeed"); - self.append_message(b"sigma_proof_commitment", sigma_proof_bytes.as_slice()); + self.append_message(b"sigma-proof-commitment", sigma_proof_bytes.as_slice()); } fn append_f_j_commitments(&mut self, f_j_commitments: &A) { @@ -139,7 +147,7 @@ impl> RangeProof for merlin::Transcrip f_j_commitments .serialize_compressed(&mut f_j_commitments_bytes) .expect("f_j_commitments serialization should succeed"); - self.append_message(b"f_j_commitments", f_j_commitments_bytes.as_slice()); + self.append_message(b"f-j-commitments", f_j_commitments_bytes.as_slice()); } fn append_h_commitment(&mut self, commitment: &A) { @@ -147,42 +155,34 @@ impl> RangeProof for merlin::Transcrip commitment .serialize_compressed(&mut commitment_bytes) .expect("h_commitment serialization should succeed"); - self.append_message(b"h_commitment", commitment_bytes.as_slice()); + self.append_message(b"h-commitment", commitment_bytes.as_slice()); } fn challenges_for_quotient_polynomials(&mut self, ell: usize) -> Vec { - let challenges = - >::challenge_128bit_scalars( - self, - b"challenge_for_quotient_polynomials", - ell + 1, - ); - - Scalar::::vec_into_inner(challenges) + >::challenge_128bit_scalars( + self, + b"challenge-for-quotient-polynomials", + ell + 1, + ) } fn challenges_for_linear_combination(&mut self, num: usize) -> Vec { - let challenges = - >::challenge_128bit_scalars( - self, - b"challenge_for_linear_combination", - num, - ); - - Scalar::::vec_into_inner(challenges) + >::challenge_128bit_scalars( + self, + b"challenge-for-linear-combination", + num, + ) } fn challenge_from_verifier(&mut self) -> E::ScalarField { - >::challenge_full_scalar( + >::challenge_full_scalar( self, - b"verifier_challenge_for_linear_combination", + b"verifier-challenge-for-linear-combination", ) - .0 } } -impl SigmaProtocol - for merlin::Transcript +impl SigmaProtocol for Transcript where H::Domain: sigma_protocol::Witness, H::Codomain: sigma_protocol::Statement, @@ -219,10 +219,27 @@ where } fn challenge_for_sigma_protocol(&mut self) -> F { - >::challenge_full_scalar( + >::challenge_full_scalar( self, - b"challenge_sigma_protocol", + b"challenge-for-sigma-protocol", ) - .0 + } +} + +impl PolynomialCommitmentScheme for Transcript { + fn append_sep(&mut self, dst: &[u8]) { + self.append_message(b"dom-sep", dst); + } + + fn append_point(&mut self, point: &C) { + let mut buf = Vec::new(); + point + .serialize_compressed(&mut buf) + .expect("Point serialization failed"); + self.append_message(b"point", &buf); + } + + fn challenge_scalar(&mut self) -> F { + >::challenge_full_scalar(self, b"challenge-for-pcs") } } diff --git a/crates/aptos-dkg/src/lib.rs b/crates/aptos-dkg/src/lib.rs index d22ac06f220bc..bb1a91081d01a 100644 --- a/crates/aptos-dkg/src/lib.rs +++ b/crates/aptos-dkg/src/lib.rs @@ -38,6 +38,7 @@ pub mod pcs; pub mod pvss; pub mod range_proofs; pub mod sigma_protocol; +//pub mod sumcheck; pub mod utils; pub mod weighted_vuf; use ark_ff::PrimeField; diff --git a/crates/aptos-dkg/src/pcs/mod.rs b/crates/aptos-dkg/src/pcs/mod.rs index d57e0ffb5d3b0..8d190fc870f28 100644 --- a/crates/aptos-dkg/src/pcs/mod.rs +++ b/crates/aptos-dkg/src/pcs/mod.rs @@ -1,5 +1,8 @@ // Copyright (c) Aptos Foundation // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE +pub mod traits; pub mod univariate_hiding_kzg; pub mod univariate_kzg; +pub mod zeromorph; +//pub mod zk_samaritan; diff --git a/crates/aptos-dkg/src/pcs/traits.rs b/crates/aptos-dkg/src/pcs/traits.rs new file mode 100644 index 0000000000000..fb64d142a66df --- /dev/null +++ b/crates/aptos-dkg/src/pcs/traits.rs @@ -0,0 +1,110 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +// Some of this is derived from: https://www.ietf.org/archive/id/draft-zkproof-polycommit-00.html + +// TODO: This trait is still very much a work in progress + +use rand::{CryptoRng, RngCore}; + +pub trait PolynomialCommitmentScheme { + type CommitmentKey: Clone; + type VerificationKey: Clone; + type Polynomial: Clone; + type WitnessField: Clone + From; // So the domain of a polynomial is a Vec + // For small fields, add ChallengeField here, which should probably have a from-WitnessField-property + type Commitment: Clone; + type Proof: Clone; + + fn setup( + // security_bits: usize, // make this an Option ?? + degree_bounds: Vec, + rng: &mut R, + ) -> (Self::CommitmentKey, Self::VerificationKey); + + fn commit( + ck: &Self::CommitmentKey, + poly: Self::Polynomial, + r: Option, + ) -> Self::Commitment; + + fn open( + ck: &Self::CommitmentKey, + poly: Self::Polynomial, + // com: Self::Commitment, + //com_state: CommitmentState, + challenge: Vec, + // Might want to put `eval` here + r: Option, + rng: &mut R, + trs: &mut merlin::Transcript, + ) -> Self::Proof; + + fn batch_open( + ck: Self::CommitmentKey, + polys: Vec, + // coms: Vec, + challenge: Vec, + rs: Option>, + rng: &mut R, + trs: &mut merlin::Transcript, + ) -> Self::Proof; + + fn verify( + vk: &Self::VerificationKey, + com: Self::Commitment, + challenge: Vec, + eval: Self::WitnessField, + proof: Self::Proof, + trs: &mut merlin::Transcript, + ) -> anyhow::Result<()>; + + fn random_witness( + rng: &mut R, + ) -> Self::WitnessField; + + fn polynomial_from_vec(vec: Vec) -> Self::Polynomial; + + fn evaluate_point( + poly: &Self::Polynomial, + point: &Vec, + ) -> Self::WitnessField; + + fn scheme_name() -> &'static [u8]; +} + +/// Generate a random polynomial from a set of size `len` consisting of values of bit-length `ell`. +/// +/// - `len` controls the number of values used to generate the polynomial. +/// - `ell` controls the bit-length of each value (should be at most 64). +pub fn random_poly( + rng: &mut R, + len: u32, // limited to u32 only because higher wouldn't be too slow for most commitment schemes + ell: u8, +) -> CS::Polynomial { + // Sample `len` field elements, each constructed from an `ell`-bit integer + let ell_bit_values: Vec = (0..len) + .map(|_| { + // Mask to `ell` bits by shifting away higher bits + let val = rng.next_u64() >> (64 - ell); + CS::WitnessField::from(val) + }) + .collect(); + + // Convert the value vector into a polynomial representation + CS::polynomial_from_vec(ell_bit_values) +} + +/// Generate a random evaluation point in FF^n. +/// +/// This corresponds to sampling a point at which the polynomial will be opened. +/// The dimension `num_vars` should be log2 of the polynomial length. +pub fn random_point< + CS: PolynomialCommitmentScheme, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + rng: &mut R, + num_vars: u32, // i.e. this is `n` if the point lies in `FF^n` +) -> Vec { + (0..num_vars).map(|_| CS::random_witness(rng)).collect() +} diff --git a/crates/aptos-dkg/src/pcs/univariate_hiding_kzg.rs b/crates/aptos-dkg/src/pcs/univariate_hiding_kzg.rs index 49d0b339f7b6d..77d4708a9b6d0 100644 --- a/crates/aptos-dkg/src/pcs/univariate_hiding_kzg.rs +++ b/crates/aptos-dkg/src/pcs/univariate_hiding_kzg.rs @@ -17,6 +17,7 @@ use aptos_crypto::{ arkworks::{ msm::{IsMsmInput, MsmInput}, random::{sample_field_element, unsafe_random_point}, + srs::{lagrange_basis, powers_of_tau, SrsBasis, SrsType}, GroupGenerators, }, utils, @@ -27,11 +28,13 @@ use ark_ec::{ AdditiveGroup, CurveGroup, VariableBaseMSM, }; use ark_ff::{Field, PrimeField}; -use ark_poly::EvaluationDomain; +use ark_poly::{ + polynomial::univariate::DensePolynomial, univariate::DenseOrSparsePolynomial, EvaluationDomain, +}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use rand::{CryptoRng, RngCore}; use sigma_protocol::homomorphism::TrivialShape as CodomainShape; -use std::fmt::Debug; +use std::{borrow::Cow, fmt::Debug}; pub type Commitment = CodomainShape<::G1>; @@ -48,33 +51,41 @@ impl OpeningProof { /// Useful for testing and benchmarking. TODO: might be able to derive this through macros etc pub fn generate(rng: &mut R) -> Self { Self { - pi_1: sigma_protocol::homomorphism::TrivialShape(unsafe_random_point(rng)), - pi_2: unsafe_random_point(rng), + pi_1: sigma_protocol::homomorphism::TrivialShape( + unsafe_random_point::(rng).into(), + ), + pi_2: unsafe_random_point::(rng).into(), } } } -#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Copy, Debug, PartialEq, Eq)] pub struct VerificationKey { pub xi_2: E::G2Affine, pub tau_2: E::G2Affine, pub group_generators: GroupGenerators, } +// For Zeromorph one also need powers of tau in g2 +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] +pub struct VerificationKeyExtra { + pub vk: VerificationKey, + pub g2_powers: Vec, +} + #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] pub struct CommitmentKey { pub xi_1: E::G1Affine, pub tau_1: E::G1Affine, - pub lagr_g1: Vec, + pub msm_basis: SrsBasis, pub eval_dom: ark_poly::Radix2EvaluationDomain, pub roots_of_unity_in_eval_dom: Vec, - pub one_1: E::G1Affine, + pub g1: E::G1Affine, pub m_inv: E::ScalarField, } #[derive(CanonicalSerialize, Debug, Clone)] pub struct Trapdoor { - // Not sure this is the ideal location for tau... pub xi: E::ScalarField, pub tau: E::ScalarField, } @@ -88,49 +99,33 @@ impl Trapdoor { } } -pub fn lagrange_basis( - n: usize, - g1: E::G1Affine, - eval_dom: ark_poly::Radix2EvaluationDomain, - tau: E::ScalarField, -) -> Vec { - let powers_of_tau = utils::powers(tau, n); - let lagr_basis_scalars = eval_dom.ifft(&powers_of_tau); - debug_assert!(lagr_basis_scalars.iter().sum::() == E::ScalarField::ONE); - - let lagr_g1_proj: Vec = lagr_basis_scalars.iter().map(|s| g1 * s).collect(); - E::G1::normalize_batch(&lagr_g1_proj) -} - -pub fn setup( +pub fn setup( m: usize, + basis_type: SrsType, group_generators: GroupGenerators, trapdoor: Trapdoor, - _rng: &mut R, ) -> (VerificationKey, CommitmentKey) { - assert!( - m.is_power_of_two(), - "Parameter m must be a power of 2, but got {}", - m - ); + utils::assert_power_of_two(m); - let GroupGenerators { - g1: one_1, - g2: one_2, - } = group_generators; + let GroupGenerators { g1, g2 } = group_generators; let Trapdoor { xi, tau } = trapdoor; - let xi_1 = (one_1 * xi).into_affine(); - let tau_1 = (one_1 * tau).into_affine(); - - let xi_2 = (one_2 * xi).into_affine(); - let tau_2 = (one_2 * tau).into_affine(); + let (xi_1, tau_1) = ((g1 * xi).into_affine(), (g1 * tau).into_affine()); + let (xi_2, tau_2) = ((g2 * xi).into_affine(), (g2 * tau).into_affine()); let eval_dom = ark_poly::Radix2EvaluationDomain::::new(m) .expect("Could not construct evaluation domain"); - let lagr_g1 = lagrange_basis::(m, one_1, eval_dom, tau); - let roots_of_unity_in_eval_dom: Vec = eval_dom.elements().collect(); + let msm_basis = match basis_type { + SrsType::Lagrange => SrsBasis::Lagrange { + lagr: lagrange_basis::(g1.into(), tau, m, eval_dom), + }, + SrsType::PowersOfTau => SrsBasis::PowersOfTau { + tau_powers: powers_of_tau::(g1.into(), tau, m), + }, + }; + + let roots_of_unity_in_eval_dom = eval_dom.elements().collect(); let m_inv = E::ScalarField::from(m as u64).inverse().unwrap(); ( @@ -142,28 +137,58 @@ pub fn setup( CommitmentKey { xi_1, tau_1, - lagr_g1, + msm_basis, eval_dom, roots_of_unity_in_eval_dom, - one_1, + g1, m_inv, }, ) } +pub fn setup_extra( + m: usize, + basis_type: SrsType, + group_generators: GroupGenerators, + trapdoor: Trapdoor, +) -> (VerificationKeyExtra, CommitmentKey) { + let tau = trapdoor.tau; + + let (vk, ck) = setup(m, basis_type, group_generators, trapdoor); + + let g2_powers = powers_of_tau::(vk.group_generators.g2.into(), tau, m); + + (VerificationKeyExtra { vk, g2_powers }, ck) +} + pub fn commit_with_randomness( ck: &CommitmentKey, values: &[E::ScalarField], r: &CommitmentRandomness, ) -> Commitment { + commit_with_randomness_and_offset(ck, values, r, 0) +} + +pub fn commit_with_randomness_and_offset( + ck: &CommitmentKey, + values: &[E::ScalarField], + r: &CommitmentRandomness, + offset: usize, +) -> Commitment { + let msm_basis: &[E::G1Affine] = match &ck.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => &lagr_g1[offset..], + SrsBasis::PowersOfTau { + tau_powers: tau_powers_g1, + } => &tau_powers_g1[offset..], + }; let commitment_hom: CommitmentHomomorphism<'_, E> = CommitmentHomomorphism { - lagr_g1: &ck.lagr_g1, + msm_basis, xi_1: ck.xi_1, }; let input = Witness { hiding_randomness: r.clone(), - values: Scalar::vec_from_inner_slice(values), + values: Scalar::vec_from_inner_slice(&values[offset..]), }; commitment_hom.apply(&input) @@ -172,22 +197,45 @@ pub fn commit_with_randomness( impl<'a, E: Pairing> CommitmentHomomorphism<'a, E> { pub fn open( ck: &CommitmentKey, - f_evals: Vec, + f_vals: Vec, // needs to be evaluations of a polynomial f OR its coefficients, depending on `ck.msm_basis` rho: E::ScalarField, x: E::ScalarField, y: E::ScalarField, s: &CommitmentRandomness, ) -> OpeningProof { - if ck.roots_of_unity_in_eval_dom.contains(&x) { - panic!("x is not allowed to be a root of unity"); - } - let q_evals = - polynomials::quotient_evaluations_batch(&f_evals, &ck.roots_of_unity_in_eval_dom, x, y); + let q_vals = match &ck.msm_basis { + SrsBasis::Lagrange { .. } => { + // Lagrange basis expects f_vals to be evaluations, and we return q_vals with evaluations + // The `quotient_evaluations_batch()` function divides over `(theta_i - x)` for `theta_i` an m-th root of unity, hence: + if ck.roots_of_unity_in_eval_dom.contains(&x) { + panic!("x is not allowed to be a root of unity"); + } + polynomials::quotient_evaluations_batch( + &f_vals, + &ck.roots_of_unity_in_eval_dom, + x, + y, + ) + }, + SrsBasis::PowersOfTau { .. } => { + // Powers-of-Tau expects f_vals to be coefficients, and we return q_vals with coefficients + // For some reason arkworks only implemented `divide_with_q_and_r()` for `DenseOrSparsePolynomial` + let f_dense = DensePolynomial { coeffs: f_vals }; + let f = DenseOrSparsePolynomial::DPolynomial(Cow::Owned(f_dense)); + let divisor_dense = DensePolynomial { + coeffs: vec![-x, E::ScalarField::ONE], + }; + let divisor = DenseOrSparsePolynomial::DPolynomial(Cow::Owned(divisor_dense)); + + let (q, _) = f.divide_with_q_and_r(&divisor).expect("Could not divide polynomial, but that shouldn't happen because the divisor is nonzero"); + q.coeffs + }, + }; - let pi_1 = commit_with_randomness(ck, &q_evals, s); + let pi_1 = commit_with_randomness(ck, &q_vals, s); // For this small MSM, the direct approach seems to be faster than using `E::G1::msm()` - let pi_2 = (ck.one_1 * rho) - (ck.tau_1 - ck.one_1 * x) * s.0; + let pi_2 = (ck.g1 * rho) - (ck.tau_1 - ck.g1 * x) * s.0; OpeningProof { pi_1, pi_2 } } @@ -269,7 +317,7 @@ impl<'a, E: Pairing> CommitmentHomomorphism<'a, E> { /// TODO: Since this code is quite similar to that of ordinary KZG, it may be possible to reduce it a bit #[derive(CanonicalSerialize, Debug, Clone, PartialEq, Eq)] pub struct CommitmentHomomorphism<'a, E: Pairing> { - pub lagr_g1: &'a [E::G1Affine], + pub msm_basis: &'a [E::G1Affine], pub xi_1: E::G1Affine, } @@ -286,6 +334,7 @@ impl homomorphism::Trait for CommitmentHomomorphism<'_, E> { type Domain = Witness; fn apply(&self, input: &Self::Domain) -> Self::Codomain { + // CommitmentHomomorphism::<'_, E>::normalize_output(self.apply_msm(self.msm_terms(input))) self.apply_msm(self.msm_terms(input)) } } @@ -301,10 +350,10 @@ impl fixed_base_msms::Trait for CommitmentHomomorphism<'_, E> { fn msm_terms(&self, input: &Self::Domain) -> Self::CodomainShape { assert!( - self.lagr_g1.len() >= input.values.len(), + self.msm_basis.len() >= input.values.len(), "Not enough Lagrange basis elements for univariate hiding KZG: required {}, got {}", input.values.len(), - self.lagr_g1.len() + self.msm_basis.len() ); let mut scalars = Vec::with_capacity(input.values.len() + 1); @@ -313,7 +362,7 @@ impl fixed_base_msms::Trait for CommitmentHomomorphism<'_, E> { let mut bases = Vec::with_capacity(input.values.len() + 1); bases.push(self.xi_1); - bases.extend(&self.lagr_g1[..input.values.len()]); + bases.extend(&self.msm_basis[..input.values.len()]); CodomainShape(MsmInput { bases, scalars }) } @@ -348,7 +397,7 @@ mod tests { let m = 64; let xi = sample_field_element(&mut rng); let tau = sample_field_element(&mut rng); - let (vk, ck) = setup::(m, group_data, Trapdoor { xi, tau }, &mut rng); + let (vk, ck) = setup::(m, SrsType::Lagrange, group_data, Trapdoor { xi, tau }); let f_coeffs: Vec> = sample_field_elements(m, &mut rng); let poly = DensePolynomial::> { coeffs: f_coeffs }; diff --git a/crates/aptos-dkg/src/pcs/zeromorph.rs b/crates/aptos-dkg/src/pcs/zeromorph.rs new file mode 100644 index 0000000000000..b2768e91dba2d --- /dev/null +++ b/crates/aptos-dkg/src/pcs/zeromorph.rs @@ -0,0 +1,590 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +// A lot of this code is copy-pasted from `jolt-core`. TODO: benchmark them against each other + +// THIS CODE HAS NOT YET BEEN VETTED, ONLY USE FOR BENCHMARKING PURPOSES!!!!! + +use crate::{ + fiat_shamir::PolynomialCommitmentScheme as _, + pcs::{ + traits::PolynomialCommitmentScheme, + univariate_hiding_kzg::{self, CommitmentRandomness}, + }, + Scalar, +}; +use aptos_crypto::{ + arkworks::{ + random::{sample_field_element, sample_field_elements}, + srs::SrsType, + GroupGenerators, + }, + utils::powers, +}; +use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup, VariableBaseMSM}; +use ark_ff::batch_inversion; +use ark_poly::{ + evaluations::multivariate::multilinear::DenseMultilinearExtension, + polynomial::univariate::DensePolynomial as UniPoly, DenseUVPolynomial, MultilinearExtension, + Polynomial, +}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{One, Zero}; +use core::fmt::Debug; +use itertools::izip; +use rand_core::{CryptoRng, RngCore}; +use rayon::prelude::*; +use std::{iter, marker::PhantomData}; + +#[derive(Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct ZeromorphProverKey { + pub commit_pp: univariate_hiding_kzg::CommitmentKey

, + pub open_pp: univariate_hiding_kzg::CommitmentKey

, // get rid of this? +} + +#[allow(non_snake_case)] +#[derive(Copy, Clone, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct ZeromorphVerifierKey { + pub kzg_vk: univariate_hiding_kzg::VerificationKey

, + pub tau_N_max_sub_2_N: P::G2Affine, +} + +#[derive(Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize, Clone)] +pub struct ZeromorphCommitment(P::G1); + +impl Default for ZeromorphCommitment

{ + fn default() -> Self { + Self(P::G1::zero()) + } +} + +#[derive(Clone, CanonicalSerialize, CanonicalDeserialize, Debug)] +pub struct ZeromorphProof { + pub pi: univariate_hiding_kzg::OpeningProof

, + pub q_hat_com: univariate_hiding_kzg::Commitment

, // KZG commitment to the batched, lifted-degree poly constructed out of the q_k + pub q_k_com: Vec>, // Vec, // vector of KZG commitments for the q_k +} + +/// Computes the multilinear quotient polynomials for a given polynomial and evaluation point. +/// +/// Given a multilinear polynomial `poly` over `n` variables and a point `point = [x_0, ..., x_{n-1}]`, +/// this function returns `(quotients, eval)`, where: +/// +/// - `quotients` is a vector of univariate polynomials `[q_0, q_1, ..., q_{n-1}]`, each representing +/// the quotient along one variable such that: +/// +/// poly(X) - poly(point) = sum_{k=0}^{n-1} (X_k - point_k) * q_k(X_0, ..., X_{k-1}) +/// +/// - `eval` is the polynomial evaluated at the point, i.e., `poly(point)`. +fn compute_multilinear_quotients( + poly: &DenseMultilinearExtension, + point: &[P::ScalarField], +) -> (Vec>, P::ScalarField) { + let num_vars = poly.num_vars; + assert_eq!(num_vars, point.len()); + + let mut remainder = poly.to_evaluations(); + let mut quotients: Vec<_> = point + .iter() + .enumerate() + .map(|(i, x_i)| { + let (remainder_lo, remainder_hi) = remainder.split_at_mut(1 << (num_vars - 1 - i)); + let mut quotient = vec![P::ScalarField::zero(); remainder_lo.len()]; + + quotient + .par_iter_mut() + .zip(&*remainder_lo) + .zip(&*remainder_hi) + .for_each(|((q, r_lo), r_hi)| { + *q = *r_hi - *r_lo; + }); + + remainder_lo + .par_iter_mut() + .zip(remainder_hi) + .for_each(|(r_lo, r_hi)| { + *r_lo += (*r_hi - *r_lo) * *x_i; + }); + + remainder.truncate(1 << (num_vars - 1 - i)); + + UniPoly::from_coefficients_vec(quotient) + }) + .collect(); + quotients.reverse(); + (quotients, remainder[0]) +} + +/// Compute the batched, lifted-degree quotient `\hat{q}` +/// +/// Example: +/// num_vars = 3 +/// N = 1 << num_vars = 8 +/// +/// q_hat has 8 coefficients: +/// indices: 0 1 2 3 4 5 6 7 +/// q_hat: [0 0 0 0 0 0 0 0] +/// +/// q0 = [a] +/// q1 = [b0, b1] +/// q2 = [c0, c1, c2, c3] +/// +/// indices: 0 1 2 3 4 5 6 7 +/// q_hat: [0 0 0 0 y²*c0 y²*c1 y*b0 + y²*c2 a + y*b1 + y²*c3] +fn compute_batched_lifted_degree_quotient( + quotients: &[UniPoly], + y_challenge: &P::ScalarField, +) -> (UniPoly, usize) { + let num_vars = quotients.len(); + + // Compute \hat{q} = \sum_k y^k * X^{N - d_k - 1} * q_k + let mut scalar = P::ScalarField::one(); // y^k + + // Rather than explicitly computing the shifts of q_k by N - d_k - 1 (i.e. multiplying q_k by X^{N - d_k - 1}) + // then accumulating them, we simply accumulate y^k*q_k into \hat{q} at the index offset N - d_k - 1 + let q_hat = quotients.iter().enumerate().fold( + vec![P::ScalarField::zero(); 1 << num_vars], // the coefficient vector + |mut q_hat, (k, q)| { + let q_hat_iter = q_hat[(1 << num_vars) - (1 << k)..].par_iter_mut(); + q_hat_iter.zip(&q.coeffs).for_each(|(q_hat, q)| { + *q_hat += scalar * *q; + }); + scalar *= *y_challenge; + q_hat + }, + ); + + (UniPoly::from_coefficients_vec(q_hat), 1 << (num_vars - 1)) +} + +fn eval_and_quotient_scalars( + y_challenge: P::ScalarField, + x_challenge: P::ScalarField, + z_challenge: P::ScalarField, + challenges: &[P::ScalarField], +) -> (P::ScalarField, (Vec, Vec)) { + let num_vars = challenges.len(); + + // squares of x = [x, x^2, .. x^{2^k}, .. x^{2^num_vars}] + let squares_of_x: Vec<_> = std::iter::successors(Some(x_challenge), |&x| Some(x * x)) + .take(num_vars + 1) + .collect(); + + // - These are cumulative products of powers of `x` in reverse order: + // ```text + // offsets_of_x[k] = Π_{j=k+1}^{n-1} x^{2^j} + // ``` + // - Example: let `num_vars = 3` and `x_challenge = x`. Then + // ```text + // squares_of_x = [x, x^2, x^4, x^8] + // offsets_of_x = [x^7, x^6, x^4] + let offsets_of_x = { + let mut offsets_of_x = squares_of_x + .iter() + .rev() + .skip(1) + .scan(P::ScalarField::one(), |acc, pow_x| { + *acc *= *pow_x; + Some(*acc) + }) + .collect::>(); + offsets_of_x.reverse(); + offsets_of_x + }; + + // vs[i] = (x^{2^n} - 1)/(x^{2^i} - 1) + let vs = { + let v_numer = squares_of_x[num_vars] - P::ScalarField::one(); + let mut v_denoms = squares_of_x + .iter() + .map(|squares_of_x| *squares_of_x - P::ScalarField::one()) + .collect::>(); + batch_inversion(&mut v_denoms); + v_denoms + .iter() + .map(|v_denom| v_numer * *v_denom) + .collect::>() + }; + + let q_scalars = izip!( + iter::successors(Some(P::ScalarField::one()), |acc| Some(*acc * y_challenge)) + .take(num_vars), + offsets_of_x, + squares_of_x, + &vs, + &vs[1..], + challenges.iter().rev() + ) + .map(|(power_of_y, offset_of_x, square_of_x, v_i, v_j, u_i)| { + ( + -(power_of_y * offset_of_x), + -(z_challenge * (square_of_x * *v_j - *u_i * *v_i)), + ) + }) + .unzip(); + // -vs[0] * z = -z * (x^(2^num_vars) - 1) / (x - 1) = -z Φ_n(x) + (-vs[0] * z_challenge, q_scalars) +} + +#[derive(Clone)] +pub struct Zeromorph { + _phantom: PhantomData

, +} + +impl

Zeromorph

+where + P: Pairing, +{ + pub fn protocol_name() -> &'static [u8] { + b"Zeromorph" + } + + // Commits to the evaluations on the hypercube + pub fn commit( + pp: &ZeromorphProverKey

, + poly: &DenseMultilinearExtension, + r: P::ScalarField, + ) -> ZeromorphCommitment

{ + // TODO: PUT THIS BACK IN + // if pp.commit_pp.g1_powers().len() < poly.len() { + // return Err(ProofVerifyError::KeyLengthError( + // pp.commit_pp.g1_powers().len(), + // poly.len(), + // )); + // } + ZeromorphCommitment( + univariate_hiding_kzg::commit_with_randomness( + &pp.commit_pp, + &poly.to_evaluations(), + &Scalar(r), + ) + .0, + ) + } + + pub fn open( + pp: &ZeromorphProverKey

, + poly: &DenseMultilinearExtension, + point: &[P::ScalarField], + eval: P::ScalarField, // Can be calculated + s: CommitmentRandomness, + rng: &mut R, + transcript: &mut merlin::Transcript, + ) -> ZeromorphProof

{ + transcript.append_sep(Self::protocol_name()); + + // TODO: PUT THIS BACK IN + // if pp.commit_pp.msm_basis.len() < poly.len() { + // return Err(ProofVerifyError::KeyLengthError( + // pp.commit_pp.g1_powers().len(), + // poly.len(), + // )); + // } + + // assert_eq!(poly.evaluate(point), *eval); + + let (quotients, _): (Vec>, P::ScalarField) = + compute_multilinear_quotients::

(poly, point); + assert_eq!(quotients.len(), poly.num_vars); + // assert_eq!(remainder, *eval); TODO: put back in? + + // Step 1: commit to all of the q_k + let rs: Vec> = + sample_field_elements::(quotients.len(), rng) + .into_iter() + .map(Scalar) + .collect(); + //let r = Scalar(sample_field_element::(rng)); + let q_k_com: Vec> = quotients + .iter() + .zip(rs.iter()) + .map(|(quotient, r)| { + univariate_hiding_kzg::commit_with_randomness(&pp.commit_pp, "ient.coeffs, r) + }) + .collect(); + + // Step 2: verifier challenge to aggregate degree bound proofs + q_k_com.iter().for_each(|c| transcript.append_point(&c.0)); + let y_challenge: P::ScalarField = transcript.challenge_scalar(); + + // Step 3: Aggregate shifted q_k into \hat{q} and compute commitment + + // Compute the batched, lifted-degree quotient `\hat{q}` + // qq_hat = ∑_{i=0}^{num_vars-1} y^i * X^(2^num_vars - d_k - 1) * q_i(x) + let (q_hat, offset) = compute_batched_lifted_degree_quotient::

("ients, &y_challenge); + + // Compute and absorb the commitment C_q = [\hat{q}] + let r = Scalar(sample_field_element::(rng)); + let q_hat_com = univariate_hiding_kzg::commit_with_randomness_and_offset( + &pp.commit_pp, + &q_hat, + &r, + offset, + ); + transcript.append_point(&q_hat_com.0); + + // Step 4/6: Obtain x challenge to evaluate the polynomial, and z challenge to aggregate two challenges + let x_challenge = transcript.challenge_scalar(); + let z_challenge = transcript.challenge_scalar(); + + // Step 5/7: Compute this batched poly + + // Compute batched degree and ZM-identity quotient polynomial pi + let (eval_scalar, (degree_check_q_scalars, zmpoly_q_scalars)): ( + P::ScalarField, + (Vec, Vec), + ) = eval_and_quotient_scalars::

(y_challenge, x_challenge, z_challenge, point); + // f = z * poly.Z + q_hat + (-z * Φ_n(x) * e) + ∑_k (q_scalars_k * q_k) hmm why no sign for the q_hat???? + let mut f = UniPoly::from_coefficients_vec(poly.to_evaluations()); + f = f * z_challenge; // TODO: add MulAssign to arkworks so you can write f *= z_challenge? + f += &q_hat; + f[0] += eval_scalar * eval; + quotients + .into_iter() + .zip(degree_check_q_scalars) + .zip(zmpoly_q_scalars) + .for_each(|((mut q, degree_check_scalar), zm_poly_scalar)| { + q = q * (degree_check_scalar + zm_poly_scalar); + f += &q; + }); + //debug_assert_eq!(f.evaluate(&x_challenge), P::ScalarField::zero()); + + // Compute and send proof commitment pi + let rho = sample_field_element::(rng); + + let pi = univariate_hiding_kzg::CommitmentHomomorphism::open( + &pp.open_pp, + f.coeffs, + rho, + x_challenge, + P::ScalarField::zero(), + &s, + ); + + ZeromorphProof { + pi, + q_hat_com, + q_k_com, + } + } + + pub fn verify( + vk: &ZeromorphVerifierKey

, + comm: &ZeromorphCommitment

, + point: &[P::ScalarField], + eval: &P::ScalarField, + proof: &ZeromorphProof

, + transcript: &mut merlin::Transcript, + ) -> anyhow::Result<()> { + transcript.append_sep(Self::protocol_name()); + + //let q_comms: Vec = proof.q_k_com.iter().map(|c| c.into_group()).collect(); + proof + .q_k_com + .iter() + .for_each(|c| transcript.append_point(&c.0)); + + // Challenge y + let y_challenge: P::ScalarField = transcript.challenge_scalar(); + + // Receive commitment C_q_hat + transcript.append_point(&proof.q_hat_com.0); + + // Get x and z challenges + let x_challenge = transcript.challenge_scalar(); + let z_challenge = transcript.challenge_scalar(); + + // Compute batched degree and ZM-identity quotient polynomial pi + let (eval_scalar, (mut q_scalars, zmpoly_q_scalars)): ( + P::ScalarField, + (Vec, Vec), + ) = eval_and_quotient_scalars::

(y_challenge, x_challenge, z_challenge, point); + q_scalars + .iter_mut() + .zip(zmpoly_q_scalars) + .for_each(|(scalar, zm_poly_q_scalar)| { + *scalar += zm_poly_q_scalar; + }); + let scalars = [ + vec![P::ScalarField::one(), z_challenge, eval_scalar * *eval], + q_scalars, + ] + .concat(); + + let mut bases_proj = Vec::with_capacity(3 + proof.q_k_com.len()); + + bases_proj.push(proof.q_hat_com.0); + bases_proj.push(comm.0); + bases_proj.push(vk.kzg_vk.group_generators.g1.into_group()); // Not so ideal to include this in `normalize_batch` but the effect should be negligible + bases_proj.extend(proof.q_k_com.iter().map(|w| w.0)); + + let bases = P::G1::normalize_batch(&bases_proj); + + let zeta_z_com = ::msm(&bases, &scalars) + .expect("MSM failed in ZeroMorph") + .into_affine(); + + // e(pi, [tau]_2 - x * [1]_2) == e(C_{\zeta,Z}, -[X^(N_max - 2^n - 1)]_2) <==> e(C_{\zeta,Z} - x * pi, [X^{N_max - 2^n - 1}]_2) * e(-pi, [tau_2]) == 1 + let pairing = P::multi_pairing( + [ + zeta_z_com, + proof.pi.pi_1.0.into_affine(), + proof.pi.pi_2.into_affine(), + ], + [ + (-vk.tau_N_max_sub_2_N.into_group()).into_affine(), + (vk.kzg_vk.tau_2.into_group() - (vk.kzg_vk.group_generators.g2 * x_challenge)) + .into(), + vk.kzg_vk.xi_2, + ], + ); + if !pairing.is_zero() { + return Err(anyhow::anyhow!("Expected zero during multi-pairing check")); + } + + Ok(()) + } +} + +impl

PolynomialCommitmentScheme for Zeromorph

+where + P: Pairing, +{ + type Commitment = ZeromorphCommitment

; + type CommitmentKey = ZeromorphProverKey

; + type Polynomial = DenseMultilinearExtension; + type Proof = ZeromorphProof

; + type VerificationKey = ZeromorphVerifierKey

; + type WitnessField = P::ScalarField; + + fn polynomial_from_vec(vec: Vec) -> Self::Polynomial { + let len = vec.len(); + let next_pow2 = len.next_power_of_two(); + let mut vec2 = vec.clone(); + + // Pad with zeros if needed + if len < next_pow2 { + vec2.resize(next_pow2, Self::WitnessField::zero()); + } + + let num_vars = next_pow2.ilog2() as usize; + + DenseMultilinearExtension::from_evaluations_vec(num_vars, vec2) + } + + // TODO: use a batch_mul algorith, like in ZK Samaritan + fn setup( + degree_bounds: Vec, + rng: &mut R, + ) -> (Self::CommitmentKey, Self::VerificationKey) { + let number_of_coefficients = degree_bounds + .iter() + .map(|&x| x + 1) + .product::() + .next_power_of_two(); + + let trapdoor = univariate_hiding_kzg::Trapdoor::

::rand(rng); + let (kzg_vk_pp, kzg_commit_pp) = univariate_hiding_kzg::setup_extra( + number_of_coefficients + 1, + SrsType::PowersOfTau, + GroupGenerators::default(), + trapdoor, + ); + //let open_pp = commit_pp; + + let prover_key = ZeromorphProverKey { + commit_pp: kzg_commit_pp.clone(), + open_pp: kzg_commit_pp, + }; + + // Derive verification key + let vk = ZeromorphVerifierKey { + kzg_vk: kzg_vk_pp.vk, + tau_N_max_sub_2_N: kzg_vk_pp.g2_powers[number_of_coefficients], + }; + + (prover_key, vk) + } + + fn commit( + ck: &Self::CommitmentKey, + poly: Self::Polynomial, + r: Option, + ) -> Self::Commitment { + let r = r.expect("Should not be empty"); + Zeromorph::commit(&ck, &poly, r) + } + + fn open( + ck: &Self::CommitmentKey, + poly: Self::Polynomial, + challenge: Vec, + r: Option, + rng: &mut R, + trs: &mut merlin::Transcript, + ) -> Self::Proof { + let s = Scalar(r.expect("open(): expected randomness r, got None")); + + let eval = Self::evaluate_point(&poly, &challenge); + Zeromorph::open(&ck, &poly, &challenge, eval, s, rng, trs) + } + + // TODO: also implement this in dekart_univariate_v2... hmm or defer to hiding KZG? + fn batch_open( + ck: Self::CommitmentKey, + polys: Vec, + // coms: Vec, + challenge: Vec, + rs: Option>, + rng: &mut R, + trs: &mut merlin::Transcript, + ) -> Self::Proof { + let rs = rs.expect("rs must be present"); + + let gamma = trs.challenge_scalar(); + let gammas = powers(gamma, polys.len()); + + let combined_poly = polys + .iter() + .zip(gammas.iter()) + .fold(Self::Polynomial::zero(), |acc, (poly, gamma_i)| { + acc + poly * gamma_i + }); + let eval = Self::evaluate_point(&combined_poly, &challenge); + + let s = rs + .iter() + .zip(gammas.iter()) + .fold(Self::WitnessField::zero(), |acc, (r, gamma_i)| { + acc + (*r * gamma_i) + }); + + Zeromorph::open(&ck, &combined_poly, &challenge, eval, Scalar(s), rng, trs) + } + + fn verify( + vk: &Self::VerificationKey, + com: Self::Commitment, + challenge: Vec, + eval: Self::WitnessField, + proof: Self::Proof, + trs: &mut merlin::Transcript, + ) -> anyhow::Result<()> { + Zeromorph::verify(&vk, &com, &challenge, &eval, &proof, trs) + } + + fn random_witness(rng: &mut R) -> Self::WitnessField { + sample_field_element(rng) + } + + fn evaluate_point( + poly: &Self::Polynomial, + point: &Vec, + ) -> Self::WitnessField { + poly.evaluate(point) + } + + fn scheme_name() -> &'static [u8] { + b"Zeromorph" + } +} diff --git a/crates/aptos-dkg/src/pvss/chunky/chunked_elgamal.rs b/crates/aptos-dkg/src/pvss/chunky/chunked_elgamal.rs index c0031967c8ffe..2b26e98b227f6 100644 --- a/crates/aptos-dkg/src/pvss/chunky/chunked_elgamal.rs +++ b/crates/aptos-dkg/src/pvss/chunky/chunked_elgamal.rs @@ -2,6 +2,8 @@ // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE use crate::{ + dlog::bsgs, + pvss::chunky::chunks, sigma_protocol, sigma_protocol::homomorphism::{self, fixed_base_msms, fixed_base_msms::Trait, EntrywiseMap}, Scalar, @@ -12,55 +14,21 @@ use aptos_crypto::arkworks::{ random::sample_field_element, }; use aptos_crypto_derive::SigmaProtocolWitness; -use ark_ec::{pairing::Pairing, AffineRepr, VariableBaseMSM}; +use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::PrimeField; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError, Write, }; use ark_std::fmt::Debug; +use std::collections::HashMap; pub const DST: &[u8; 35] = b"APTOS_CHUNKED_ELGAMAL_GENERATOR_DST"; // This is used to create public parameters, see `default()` below -// TODO: Change this to PublicParameters. Would first require changing Scalar to Scalar, which would be a bit of work -#[derive(CanonicalSerialize, CanonicalDeserialize, PartialEq, Clone, Eq, Debug)] -#[allow(non_snake_case)] -pub struct PublicParameters { - /// A group element $G$ that is raised to the encrypted message - pub G: E::G1Affine, - /// A group element $H$ that is used to exponentiate both - /// (1) the ciphertext randomness and (2) the DSK when computing its EK. - pub H: E::G1Affine, -} - -#[allow(non_snake_case)] -impl PublicParameters { - pub fn new(G: E::G1Affine, H: E::G1Affine) -> Self { - Self { G, H } - } - - pub fn message_base(&self) -> &E::G1Affine { - &self.G - } - - pub fn pubkey_base(&self) -> &E::G1Affine { - &self.H - } - - pub fn default() -> Self { - let G = hashing::unsafe_hash_to_affine(b"G", DST); - // Chunky's encryption pubkey base must match up with the blst base, since validators - // reuse their consensus keypairs as encryption keypairs - let H = E::G1Affine::generator(); - debug_assert_ne!(G, H); - Self { G, H } - } -} - /// Formally, given: /// - `G_1, H_1` ∈ G₁ (group generators) /// - `ek_i` ∈ G₁ (encryption keys) -/// - `z_i,j` ∈ Scalar (plaintext scalars z_i, chunked into z_i,j) -/// - `r_j` ∈ Scalar (randomness for each `column` of chunks z_i,j) +/// - `z_i,j` ∈ Scalar (from plaintext scalars `z_i`, each chunked into a vector z_i,j) +/// - `r_j` ∈ Scalar (randomness for `j` in a vector of chunks z_i,j) /// /// The homomorphism maps input `[z_i,j]` and randomness `[r_j]` to /// the following codomain elements: @@ -74,47 +42,50 @@ impl PublicParameters { /// and `R_j` carry the corresponding randomness contributions. #[derive(Debug, Clone, PartialEq, Eq)] #[allow(non_snake_case)] -pub struct Homomorphism<'a, E: Pairing> { - pub pp: &'a PublicParameters, // This is small so could clone it here, then no custom `CanonicalSerialize` is needed - pub eks: &'a [E::G1Affine], +pub struct WeightedHomomorphism<'a, C: CurveGroup> { + pub pp: &'a PublicParameters, // These are small so no harm in copying them here + pub eks: &'a [C::Affine], // TODO: capitalize to EKs ? } -// Identical to the previous struct as the bases are identical, but the witness will be different -#[derive(Debug, Clone, PartialEq, Eq)] #[allow(non_snake_case)] -pub struct WeightedHomomorphism<'a, E: Pairing> { - pub pp: &'a PublicParameters, - pub eks: &'a [E::G1Affine], +#[derive(CanonicalSerialize, CanonicalDeserialize, PartialEq, Clone, Eq, Debug)] +pub struct PublicParameters { + /// A group element $G$ that is raised to the encrypted message + pub G: C::Affine, + /// A group element $H$ that is used to exponentiate both + /// (1) the ciphertext randomness and (2) the DSK when computing its EK. + pub H: C::Affine, } -// Need to manually implement `CanonicalSerialize` because `Homomorphism` has references instead of owned values -impl<'a, E: Pairing> CanonicalSerialize for Homomorphism<'a, E> { - fn serialize_with_mode( - &self, - mut writer: W, - compress: Compress, - ) -> Result<(), SerializationError> { - self.pp.G.serialize_with_mode(&mut writer, compress)?; - self.pp.H.serialize_with_mode(&mut writer, compress)?; - for ek in self.eks { - ek.serialize_with_mode(&mut writer, compress)?; - } - Ok(()) +#[allow(non_snake_case)] +impl PublicParameters { + pub fn new(G: C::Affine, H: C::Affine) -> Self { + Self { G, H } } - fn serialized_size(&self, compress: Compress) -> usize { - self.pp.G.serialized_size(compress) - + self.pp.H.serialized_size(compress) - + self - .eks - .iter() - .map(|ek| ek.serialized_size(compress)) - .sum::() + pub fn message_base(&self) -> &C::Affine { + &self.G + } + + pub fn pubkey_base(&self) -> &C::Affine { + &self.H + } +} + +#[allow(non_snake_case)] +impl Default for PublicParameters { + fn default() -> Self { + let G = hashing::unsafe_hash_to_affine(b"G", DST); + // Chunky's encryption pubkey base must match up with the blst base, since validators + // reuse their consensus keypairs as encryption keypairs + let H = C::Affine::generator(); + debug_assert_ne!(G, H); + Self { G, H } } } -// TODO: get rid of this copy-paste with a marker... -impl<'a, E: Pairing> CanonicalSerialize for WeightedHomomorphism<'a, E> { +// Need to manually implement `CanonicalSerialize` because `Homomorphism` has references instead of owned values +impl<'a, C: CurveGroup> CanonicalSerialize for WeightedHomomorphism<'a, C> { fn serialize_with_mode( &self, mut writer: W, @@ -140,12 +111,6 @@ impl<'a, E: Pairing> CanonicalSerialize for WeightedHomomorphism<'a, E> { } /// This struct is used as `CodomainShape`, but the same layout also applies to the `Witness` type. -#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] -pub struct CodomainShape { - pub chunks: Vec>, // Depending on T these can be chunked ciphertexts, or their MSM representations - pub randomness: Vec, // Same story, depending on T -} - #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] pub struct WeightedCodomainShape { pub chunks: Vec>>, // Depending on T these can be chunked ciphertexts, or their MSM representations @@ -155,14 +120,6 @@ pub struct WeightedCodomainShape>` would later require deriving SigmaProtocolWitness for CodomainShape // (and would be overkill anyway), but this leads to issues as it expects `T` to be a Pairing, so we'll simply redefine it: -#[derive( - SigmaProtocolWitness, CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq, -)] -pub struct Witness { - pub plaintext_chunks: Vec>>, - pub plaintext_randomness: Vec>, // PlaintextRandomness, -} - #[derive( SigmaProtocolWitness, CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq, )] @@ -171,60 +128,24 @@ pub struct WeightedWitness { pub plaintext_randomness: Vec>>, // For at most max_weight, there needs to be a vector of randomness to encrypt a vector of chunks } -// type PlayerPlaintextChunks = Vec>>; -// type PlaintextRandomness = Vec>; - -impl homomorphism::Trait for Homomorphism<'_, E> { - type Codomain = CodomainShape; - type Domain = Witness; - - fn apply(&self, input: &Self::Domain) -> Self::Codomain { - self.apply_msm(self.msm_terms(input)) - } -} - -impl homomorphism::Trait for WeightedHomomorphism<'_, E> { - type Codomain = WeightedCodomainShape; - type Domain = WeightedWitness; +impl homomorphism::Trait for WeightedHomomorphism<'_, C> { + type Codomain = WeightedCodomainShape; + type Domain = WeightedWitness; fn apply(&self, input: &Self::Domain) -> Self::Codomain { self.apply_msm(self.msm_terms(input)) } } -// TODO: Can problably do EntrywiseMap with another derive macro -impl EntrywiseMap - for CodomainShape -{ - type Output = - CodomainShape; - - fn map(self, f: F) -> Self::Output - where - F: Fn(T) -> U, - U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, - { - let chunks = self - .chunks - .into_iter() - .map(|row| row.into_iter().map(&f).collect()) - .collect(); - - let randomness = self.randomness.into_iter().map(f).collect(); - - CodomainShape { chunks, randomness } - } -} - impl EntrywiseMap for WeightedCodomainShape { type Output = WeightedCodomainShape; - fn map(self, f: F) -> Self::Output + fn map(self, mut f: F) -> Self::Output where - F: Fn(T) -> U, + F: FnMut(T) -> U, U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, { let chunks = self @@ -232,7 +153,7 @@ impl Entrywis .into_iter() .map(|row| { row.into_iter() - .map(|inner_row| inner_row.into_iter().map(&f).collect::>()) + .map(|inner_row| inner_row.into_iter().map(&mut f).collect::>()) .collect::>() }) .collect(); @@ -240,25 +161,13 @@ impl Entrywis let randomness = self .randomness .into_iter() - .map(|inner_vec| inner_vec.into_iter().map(&f).collect::>()) + .map(|inner_vec| inner_vec.into_iter().map(&mut f).collect::>()) .collect(); WeightedCodomainShape { chunks, randomness } } } -// TODO: Use a derive macro? -impl IntoIterator for CodomainShape { - type IntoIter = std::vec::IntoIter; - type Item = T; - - fn into_iter(self) -> Self::IntoIter { - let mut combined: Vec = self.chunks.into_iter().flatten().collect(); // Temporary Vec can probably be avoided, but might require unstable Rust or a lot of lines - combined.extend(self.randomness); - combined.into_iter() - } -} - impl IntoIterator for WeightedCodomainShape { @@ -272,57 +181,14 @@ impl IntoIterator } } -#[allow(non_snake_case)] -impl<'a, E: Pairing> fixed_base_msms::Trait for Homomorphism<'a, E> { - type CodomainShape - = CodomainShape - where - T: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq; - type MsmInput = MsmInput; - type MsmOutput = E::G1; - type Scalar = E::ScalarField; - - fn msm_terms(&self, input: &Self::Domain) -> Self::CodomainShape { - // C_{i,j} = z_{i,j} * G_1 + r_j * ek[i] - let Cs = input - .plaintext_chunks - .iter() - .enumerate() - .map(|(i, z_i)| { - // here i is the player's id - chunks_msm_terms(self.pp, self.eks[i], z_i, &input.plaintext_randomness) - }) - .collect(); - - // R_j = r_j * H_1 - let Rs = input - .plaintext_randomness - .iter() - .map(|&r_j| MsmInput { - bases: vec![self.pp.H], - scalars: vec![r_j.0], - }) - .collect(); - - CodomainShape { - chunks: Cs, - randomness: Rs, - } - } - - fn msm_eval(input: Self::MsmInput) -> Self::MsmOutput { - E::G1::msm(input.bases(), input.scalars()).expect("MSM failed in ChunkedElgamal") - } -} - // Given a chunked scalar [z_j] and vector of randomness [r_j], returns a vector of MSM terms // of the vector C_j = z_j * G_1 + r_j * ek, so a vector with entries [(G_1, ek), (z_j, r_j)]_j -fn chunks_msm_terms( - pp: &PublicParameters, - ek: ::G1Affine, - chunks: &[Scalar], - correlated_randomness: &[Scalar], -) -> Vec> { +fn chunks_msm_terms( + pp: &PublicParameters, + ek: C::Affine, + chunks: &[Scalar], + correlated_randomness: &[Scalar], +) -> Vec> { chunks .iter() .zip(correlated_randomness.iter()) @@ -335,30 +201,30 @@ fn chunks_msm_terms( // Given a vector of chunked scalar [[z_j]] and vector of randomness [[r_j]], returns a vector of // vector of MSM terms. This is used for the weighted PVSS, where each player gets a vector of chunks -pub fn chunks_vec_msm_terms( - pp: &PublicParameters, - ek: ::G1Affine, - chunks_vec: &[Vec>], - correlated_randomness_vec: &[Vec>], -) -> Vec>> { +pub fn chunks_vec_msm_terms( + pp: &PublicParameters, + ek: C::Affine, + chunks_vec: &[Vec>], + correlated_randomness_vec: &[Vec>], +) -> Vec>> { chunks_vec .iter() .zip(correlated_randomness_vec.iter()) .map(|(chunks, correlated_randomness)| { - chunks_msm_terms(pp, ek, chunks, correlated_randomness) + chunks_msm_terms::(pp, ek, chunks, correlated_randomness) }) .collect() } #[allow(non_snake_case)] -impl<'a, E: Pairing> fixed_base_msms::Trait for WeightedHomomorphism<'a, E> { +impl<'a, C: CurveGroup> fixed_base_msms::Trait for WeightedHomomorphism<'a, C> { type CodomainShape = WeightedCodomainShape where T: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq; - type MsmInput = MsmInput; - type MsmOutput = E::G1; - type Scalar = E::ScalarField; + type MsmInput = MsmInput; + type MsmOutput = C; + type Scalar = C::ScalarField; fn msm_terms(&self, input: &Self::Domain) -> Self::CodomainShape { // C_{i,j} = z_{i,j} * G_1 + r_j * ek[i] @@ -368,7 +234,7 @@ impl<'a, E: Pairing> fixed_base_msms::Trait for WeightedHomomorphism<'a, E> { .enumerate() .map(|(i, z_i)| { // here `i` is the player's id - chunks_vec_msm_terms(self.pp, self.eks[i], z_i, &input.plaintext_randomness) + chunks_vec_msm_terms::(self.pp, self.eks[i], z_i, &input.plaintext_randomness) }) .collect(); @@ -394,17 +260,11 @@ impl<'a, E: Pairing> fixed_base_msms::Trait for WeightedHomomorphism<'a, E> { } fn msm_eval(input: Self::MsmInput) -> Self::MsmOutput { - E::G1::msm(input.bases(), input.scalars()).expect("MSM failed in ChunkedElgamal") - } -} - -impl<'a, E: Pairing> sigma_protocol::Trait for Homomorphism<'a, E> { - fn dst(&self) -> Vec { - DST.to_vec() + C::msm(input.bases(), input.scalars()).expect("MSM failed in ChunkedElgamal") } } -impl<'a, E: Pairing> sigma_protocol::Trait for WeightedHomomorphism<'a, E> { +impl<'a, C: CurveGroup> sigma_protocol::Trait for WeightedHomomorphism<'a, C> { fn dst(&self) -> Vec { let mut result = b"WEIGHTED_".to_vec(); result.extend(DST); @@ -412,159 +272,189 @@ impl<'a, E: Pairing> sigma_protocol::Trait for WeightedHomomorphism<'a, E } } -pub(crate) fn correlated_randomness(rng: &mut R, radix: u64, num_chunks: u32) -> Vec +pub fn correlated_randomness( + rng: &mut R, + radix: u64, + num_chunks: u32, + target_sum: &F, +) -> Vec where - F: ark_ff::PrimeField, + F: PrimeField, // need `PrimeField` here because of `sample_field_element()` R: rand_core::RngCore + rand_core::CryptoRng, { - let mut r_vals = Vec::with_capacity(num_chunks as usize); - r_vals.push(F::zero()); // placeholder for r_0 - let mut remainder = F::zero(); - - // Precompute radix as F once + let mut r_vals = vec![F::zero(); num_chunks as usize]; + let mut remaining = *target_sum; let radix_f = F::from(radix); let mut cur_base = radix_f; - // Fill r_1 .. r_{num_chunks-1} randomly - for _ in 1..num_chunks { - let r = sample_field_element(rng); - r_vals.push(r); - remainder -= r * cur_base; + for i in 1..(num_chunks as usize) { + r_vals[i] = sample_field_element(rng); + remaining -= r_vals[i] * cur_base; cur_base *= radix_f; } - - r_vals[0] = remainder; + r_vals[0] = remaining; r_vals } -pub(crate) fn num_chunks_per_scalar(ell: u8) -> u32 { +pub fn num_chunks_per_scalar(ell: u8) -> u32 { F::MODULUS_BIT_SIZE.div_ceil(ell as u32) // Maybe add `as usize` here? } +/// Decrypt a vector of chunked ciphertexts using the corresponding committed randomness and decryption keys +/// +/// # Arguments +/// - `Cs_rows`: slice of vectors, each inner vector contains chunks for one scalar. +/// - `Rs_rows`: slice of vectors, same shape as `Cs_rows`, contains corresponding committed randomness/keys. +/// - `dk`: decryption key for the player. +/// - `pp`: public parameters (provides group generator). +/// - `table`: precomputed BSGS table for discrete log. +/// - `radix_exponent`: exponent used to split/reconstruct chunks. +/// +/// # Returns +/// - Vec of decrypted scalars. +#[allow(non_snake_case)] +pub fn decrypt_chunked_scalars( + Cs_rows: &[Vec], + Rs_rows: &[Vec], + dk: &C::ScalarField, + pp: &PublicParameters, + table: &HashMap, u32>, + radix_exponent: u8, +) -> Vec { + let mut decrypted_scalars = Vec::with_capacity(Cs_rows.len()); + + for (row, Rs_row) in Cs_rows.iter().zip(Rs_rows.iter()) { + // Compute C - d_k * R for each chunk + let exp_chunks: Vec = row + .iter() + .zip(Rs_row.iter()) + .map(|(C_ij, &R_j)| C_ij.sub(R_j * *dk)) + .collect(); + + // Recover plaintext chunks + let chunk_values: Vec<_> = + bsgs::dlog_vec(pp.G.into_group(), &exp_chunks, &table, 1 << radix_exponent) + .expect("dlog_vec failed") + .into_iter() + .map(|x| C::ScalarField::from(x)) + .collect(); + + // Convert chunks back to scalar + let recovered = chunks::le_chunks_to_scalar(radix_exponent, &chunk_values); + + decrypted_scalars.push(recovered); + } + + decrypted_scalars +} + #[cfg(test)] mod tests { use super::*; - use crate::{dlog, dlog::bsgs, pvss::chunky::chunks, sigma_protocol::homomorphism::Trait as _}; + use crate::{dlog, pvss::chunky::chunks, sigma_protocol::homomorphism::Trait as _}; use aptos_crypto::{ - arkworks::random::{sample_field_elements, unsafe_random_points}, - utils, + arkworks::{random::sample_field_elements, shamir::ShamirThresholdConfig}, + weighted_config::WeightedConfig, }; - use ark_ec::{AffineRepr, CurveGroup}; + use ark_ec::CurveGroup; use rand::thread_rng; - use std::ops::Sub; - fn prepare_chunked_witness( - num_values: usize, + fn test_correlated_randomness_generic() { + let mut rng = thread_rng(); + let target_sum = F::one(); + let radix: u64 = 4; + let num_chunks: u8 = 8; + + let coefs = correlated_randomness(&mut rng, radix, num_chunks as u32, &target_sum); + + // Compute actual sum: Σ coef[i] * radix^i + let actual_sum: F = (0..num_chunks) + .map(|i| coefs[i as usize] * F::from(radix.pow(i as u32))) + .sum(); + + assert_eq!(target_sum, actual_sum); + } + + #[test] + fn test_correlated_randomness_bn254() { + use ark_bn254::Fr; + test_correlated_randomness_generic::(); + } + + fn prepare_chunked_witness( + sc: WeightedConfig>, ell: u8, - ) -> (Vec, Witness, u8, u32) { + ) -> (Vec, WeightedWitness, u8, u32) { let mut rng = thread_rng(); // 1. Generate random values - let zs = sample_field_elements(num_values, &mut rng); + let zs = sample_field_elements(sc.get_total_weight(), &mut rng); // 2. Compute number of chunks - let number_of_chunks = num_chunks_per_scalar::(ell); + let number_of_chunks = num_chunks_per_scalar::(ell); // 3. Generate correlated randomness - let rs: Vec = correlated_randomness(&mut rng, 1 << ell, number_of_chunks); + let rs: Vec> = (0..sc.get_max_weight()) + .map(|_| correlated_randomness(&mut rng, 1 << ell, number_of_chunks, &F::ZERO)) + .collect(); // 4. Convert values into little-endian chunks - let chunked_values: Vec> = zs + let chunked_values: Vec> = zs .iter() .map(|z| chunks::scalar_to_le_chunks(ell, z)) .collect(); // 5. Build witness - let witness = Witness { - plaintext_chunks: Scalar::::vecvec_from_inner(chunked_values), - plaintext_randomness: Scalar::vec_from_inner(rs), + let witness = WeightedWitness { + plaintext_chunks: sc.group_by_player(&Scalar::vecvec_from_inner(chunked_values)), + plaintext_randomness: Scalar::vecvec_from_inner(rs), }; (zs, witness, ell, number_of_chunks) } #[allow(non_snake_case)] - fn test_reconstruct_ciphertexts() { - let (zs, witness, radix_exponent, _num_chunks) = prepare_chunked_witness::(2, 16); + fn test_decrypt_roundtrip() { + // 2-out-of-3, weights 2 1 + let sc = + WeightedConfig::>::new(2, vec![2, 1]).unwrap(); - // 6. Initialize the homomorphism - let pp: PublicParameters = PublicParameters::default(); - - let hom = Homomorphism { - pp: &pp, - eks: &E::G1::normalize_batch(&unsafe_random_points(2, &mut thread_rng())), // Randomly generate encryption keys, we won't use them - }; - - // 7. Apply homomorphism to obtain chunked ciphertexts - let CodomainShape { chunks: Cs, .. } = hom.apply(&witness); - - // 8. Reconstruct original values from the chunked ciphertexts - for (i, &orig_val) in zs.iter().enumerate() { - let powers_of_radix: Vec = - utils::powers(E::ScalarField::from(1u64 << radix_exponent), Cs[i].len()); - - // perform the MSM to reconstruct the encryption of z_i - let reconstructed = E::G1::msm(&E::G1::normalize_batch(&Cs[i]), &powers_of_radix) - .expect("MSM reconstruction failed"); - - let expected = *pp.message_base() * orig_val; - assert_eq!( - reconstructed, expected, - "Reconstructed value {} does not match original", - i - ); - } - } - - // This is essentially a more advanced version of the previous test... so remove that one? - #[allow(non_snake_case)] - fn test_decrypt_roundtrip() { - let (zs, witness, radix_exponent, _num_chunks) = prepare_chunked_witness::(2, 16); + let (zs, witness, radix_exponent, _num_chunks) = + prepare_chunked_witness::(sc, 16); // 6. Initialize the homomorphism - let pp: PublicParameters = PublicParameters::default(); - let dks: Vec = sample_field_elements(2, &mut thread_rng()); + let pp: PublicParameters = PublicParameters::default(); + let dks: Vec = sample_field_elements(2, &mut thread_rng()); - let hom = Homomorphism { + let hom = WeightedHomomorphism:: { pp: &pp, - eks: &E::G1::normalize_batch(&[pp.H * dks[0], pp.H * dks[1]]), + eks: &C::normalize_batch(&[pp.H * dks[0], pp.H * dks[1]]), // 2 players }; // 7. Apply homomorphism to obtain chunked ciphertexts - let CodomainShape:: { + let WeightedCodomainShape:: { chunks: Cs, randomness: Rs, } = hom.apply(&witness); // 8. Build a baby-step giant-step table for computing discrete logs - let table = dlog::table::build::(pp.G.into(), 1u32 << (radix_exponent / 2)); + let table = dlog::table::build::(pp.G.into(), 1u32 << (radix_exponent / 2)); // 9. Perform decryption of each ciphertext and reconstruct plaintexts + // TODO: call some built-in function for this instead let mut decrypted_scalars = Vec::new(); - for i in 0..2 { - // Compute C - d_k * R for all chunks - let exponentiated_chunks: Vec = Cs[i] - .iter() - .zip(Rs.iter()) - .map(|(C_ij, &R_j)| C_ij.sub(R_j * dks[i])) - .collect(); - - // Recover plaintext chunk values - let chunks: Vec<_> = bsgs::dlog_vec( - pp.G.into_group(), - &exponentiated_chunks, + for player_id in 0..Cs.len() { + let decrypted_for_player = decrypt_chunked_scalars( + &Cs[player_id], + &Rs, + &dks[player_id], + &pp, &table, - 1 << radix_exponent, - ) - .expect("dlog_vec failed") - .into_iter() - .map(|x| E::ScalarField::from(x)) - .collect(); + radix_exponent, + ); - // Convert chunks back to scalar - let recovered = chunks::le_chunks_to_scalar(radix_exponent, &chunks); - decrypted_scalars.push(recovered); + decrypted_scalars.extend(decrypted_for_player); } // 10. Compare decrypted scalars to original plaintexts @@ -577,13 +467,8 @@ mod tests { } } - #[test] - fn test_reconstruct_ciphertexts_bn254() { - test_reconstruct_ciphertexts::(); - } - #[test] fn test_decrypt_roundtrip_bn254() { - test_decrypt_roundtrip::(); + test_decrypt_roundtrip::(); } } diff --git a/crates/aptos-dkg/src/pvss/chunky/chunked_scalar_mul.rs b/crates/aptos-dkg/src/pvss/chunky/chunked_scalar_mul.rs new file mode 100644 index 0000000000000..49cdc75773458 --- /dev/null +++ b/crates/aptos-dkg/src/pvss/chunky/chunked_scalar_mul.rs @@ -0,0 +1,190 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +use crate::{ + pvss::chunky::chunks::le_chunks_to_scalar, + sigma_protocol, + sigma_protocol::{ + homomorphism, + homomorphism::{fixed_base_msms, fixed_base_msms::Trait, EntrywiseMap}, + }, + Scalar, +}; +use aptos_crypto::arkworks::msm::{IsMsmInput, MsmInput}; +use aptos_crypto_derive::SigmaProtocolWitness; +use ark_ec::CurveGroup; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use std::fmt::Debug; + +pub const DST: &[u8; 34] = b"APTOS_CHUNKED_COMMIT_HOM_SIGMA_DST"; + +// TODO: arrange things by player... +/// In this file we set up the following "commitment" homomorphism: +/// Commit to chunked scalars by unchunking them and multiplying a base group element (in affine representation) +/// with each unchunked scalar. +/// +/// Equivalent to `[base * unchunk(chunk) for chunks in chunked_scalars]`. +#[derive(CanonicalSerialize, Debug, Clone, PartialEq, Eq)] +pub struct Homomorphism { + pub base: C::Affine, + pub ell: u8, +} + +// pub type CodomainShape = VectorShape; +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] +pub struct CodomainShape(pub Vec>); + +impl EntrywiseMap for CodomainShape +where + T: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, +{ + type Output + = CodomainShape + where + U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq; + + fn map(self, mut f: F) -> Self::Output + where + F: FnMut(T) -> U, + U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, + { + CodomainShape( + self.0 + .into_iter() + .map(|row| row.into_iter().map(&mut f).collect()) + .collect(), + ) + } +} + +impl IntoIterator for CodomainShape +where + T: CanonicalSerialize + CanonicalDeserialize + Clone, +{ + type IntoIter = std::vec::IntoIter; + type Item = T; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter().flatten().collect::>().into_iter() + } +} + +#[derive( + SigmaProtocolWitness, CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq, +)] +pub struct Witness { + pub chunked_values: Vec>>>, +} + +impl homomorphism::Trait for Homomorphism { + type Codomain = CodomainShape; + type Domain = Witness; + + fn apply(&self, input: &Self::Domain) -> Self::Codomain { + self.apply_msm(self.msm_terms(input)) + } +} + +impl fixed_base_msms::Trait for Homomorphism { + type CodomainShape + = CodomainShape + where + T: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq; + type MsmInput = MsmInput; + type MsmOutput = C; + type Scalar = C::ScalarField; + + fn msm_terms(&self, input: &Self::Domain) -> Self::CodomainShape { + let rows: Vec> = input + .chunked_values + .iter() + .map(|row| { + row.iter() + .map(|chunks| MsmInput { + bases: vec![self.base.clone()], + scalars: vec![le_chunks_to_scalar( + self.ell, + &Scalar::slice_as_inner(chunks), + )], + }) + .collect() + }) + .collect(); + + CodomainShape(rows) + } + + fn msm_eval(input: Self::MsmInput) -> Self::MsmOutput { + C::msm(input.bases(), input.scalars()).expect("MSM failed in Schnorr") // TODO: custom MSM here, because only length 1 MSM except during verification + } +} + +impl sigma_protocol::Trait for Homomorphism { + fn dst(&self) -> Vec { + DST.to_vec() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + pvss::chunky::chunks::{le_chunks_to_scalar, scalar_to_le_chunks}, + sigma_protocol::homomorphism::Trait as _, + }; + use aptos_crypto::arkworks::random::{sample_field_elements, unsafe_random_point}; + use ark_bls12_381::G1Projective; + use rand::thread_rng; + + #[test] + #[allow(non_snake_case)] + fn test_chunked_homomorphism_ell_16() { + let mut rng = thread_rng(); + + // Parameters + let ell: u8 = 16; + let num_scalars = 8; + + // Random base + let base = unsafe_random_point::(&mut rng); + + // Create random scalars + let scalars = sample_field_elements(num_scalars, &mut rng); + + // Chunk each scalar into little-endian chunks of size `ell` + let chunked_values: Vec>>> = scalars + .iter() + .map(|s| { + vec![scalar_to_le_chunks(ell, s) + .into_iter() + .map(|chunk| Scalar(chunk)) + .collect::>()] + }) + .collect(); + + let witness = Witness { + chunked_values: chunked_values.clone(), + }; + + let hom = Homomorphism:: { base, ell }; + + // Apply the homomorphism + let CodomainShape(outputs) = hom.apply(&witness); + + // Check correctness: + // base * unchunk(chunks) == output + for (player_chunks, player_Vs) in chunked_values.iter().zip(outputs.iter()) { + for (scalar_chunks, V) in player_chunks.iter().zip(player_Vs.iter()) { + let reconstructed = + le_chunks_to_scalar(ell, &Scalar::slice_as_inner(scalar_chunks)); + + let expected = base * reconstructed; + assert_eq!( + *V, expected, + "Homomorphism output does not match expected base * scalar" + ); + } + } + } +} diff --git a/crates/aptos-dkg/src/pvss/chunky/chunks.rs b/crates/aptos-dkg/src/pvss/chunky/chunks.rs index f0265253a60ab..706d02890d778 100644 --- a/crates/aptos-dkg/src/pvss/chunky/chunks.rs +++ b/crates/aptos-dkg/src/pvss/chunky/chunks.rs @@ -3,8 +3,8 @@ use ark_ff::{BigInteger, PrimeField}; -/// Converts a field element into little-endian chunks of `num_bits` bits. -pub(crate) fn scalar_to_le_chunks(num_bits: u8, scalar: &F) -> Vec { +/// Converts a field element into little-endian chunks of `num_bits` bits. Made `pub` for tests +pub fn scalar_to_le_chunks(num_bits: u8, scalar: &F) -> Vec { assert!( num_bits.is_multiple_of(8) && num_bits > 0 && num_bits <= 64, "Invalid chunk size" @@ -28,8 +28,8 @@ pub(crate) fn scalar_to_le_chunks(num_bits: u8, scalar: &F) -> Ve chunks } -/// Reconstructs a field element from `num_bits`-bit chunks (little-endian order). -pub(crate) fn le_chunks_to_scalar(num_bits: u8, chunks: &[F]) -> F { +/// Reconstructs a field element from `num_bits`-bit chunks (little-endian order). Made `pub` for tests +pub fn le_chunks_to_scalar(num_bits: u8, chunks: &[F]) -> F { assert!( num_bits.is_multiple_of(8) && num_bits > 0 && num_bits <= 64, // TODO: so make num_bits a u8? "Invalid chunk size" diff --git a/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal.rs b/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal.rs index 6924a033fe825..c0154c7bbe5e2 100644 --- a/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal.rs +++ b/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal.rs @@ -16,14 +16,14 @@ use crate::{ }; use aptos_crypto::{ arkworks::random::{ - sample_field_element, sample_field_elements, unsafe_random_point, unsafe_random_points, - UniformRand, + sample_field_element, sample_field_elements, unsafe_random_point_group, + unsafe_random_points_group, UniformRand, }, weighted_config::WeightedConfigArkworks, SecretSharingConfig, }; use aptos_crypto_derive::SigmaProtocolWitness; -use ark_ec::{pairing::Pairing, AdditiveGroup}; +use ark_ec::{pairing::Pairing, AdditiveGroup, AffineRepr, CurveGroup}; use ark_ff::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; @@ -41,15 +41,6 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; /// - the HKZG randomness, /// - the chunked plaintexts, and /// - the ElGamal randomness. -#[derive( - SigmaProtocolWitness, CanonicalSerialize, CanonicalDeserialize, Debug, Clone, PartialEq, Eq, -)] -pub struct HkzgElgamalWitness { - pub hkzg_randomness: univariate_hiding_kzg::CommitmentRandomness, - pub chunked_plaintexts: Vec>>, // For each plaintext z_i, a chunk z_{i,j} - pub elgamal_randomness: Vec>, // For each chunk, a blinding factor -} - #[derive( SigmaProtocolWitness, CanonicalSerialize, CanonicalDeserialize, Debug, Clone, PartialEq, Eq, )] @@ -70,22 +61,13 @@ pub struct HkzgWeightedElgamalWitness { /// two components: in each case, the witness omits (or “ignores”) one of its three fields, then applies /// a homomorphism. Thus, the overall homomorphism of the Σ-protocol can be viewed as a tuple of two /// *lifted* homomorphisms. -type LiftedHkzg<'a, E> = LiftHomomorphism< - univariate_hiding_kzg::CommitmentHomomorphism<'a, E>, - HkzgElgamalWitness<::ScalarField>, ->; -type LiftedChunkedElgamal<'a, E> = LiftHomomorphism< - chunked_elgamal::Homomorphism<'a, E>, - HkzgElgamalWitness<::ScalarField>, ->; - type LiftedHkzgWeighted<'a, E> = LiftHomomorphism< univariate_hiding_kzg::CommitmentHomomorphism<'a, E>, HkzgWeightedElgamalWitness<::ScalarField>, >; -type LiftedWeightedChunkedElgamal<'a, E> = LiftHomomorphism< - chunked_elgamal::WeightedHomomorphism<'a, E>, - HkzgWeightedElgamalWitness<::ScalarField>, +type LiftedWeightedChunkedElgamal<'a, C> = LiftHomomorphism< + chunked_elgamal::WeightedHomomorphism<'a, C>, + HkzgWeightedElgamalWitness<<::Affine as AffineRepr>::ScalarField>, >; // ┌───────────────────────────────┐ @@ -138,67 +120,40 @@ type LiftedWeightedChunkedElgamal<'a, E> = LiftHomomorphism< // TODO: note here that we had to put a zero before z_{i,j}, because that's what DeKARTv2 is doing. So maybe // it would make more sense to say this is a tuple homomorphism consisting of (lifts of) the // DeKARTv2::commitment_homomorphism together with the chunked_elgamal::homomorphism. -pub type Homomorphism<'a, E> = TupleHomomorphism, LiftedChunkedElgamal<'a, E>>; -pub type WeightedHomomorphism<'a, E> = - TupleHomomorphism, LiftedWeightedChunkedElgamal<'a, E>>; +//pub type Homomorphism<'a, E> = TupleHomomorphism, LiftedChunkedElgamal<'a, ::G1>>; +pub type WeightedHomomorphism<'a, E> = TupleHomomorphism< + LiftedHkzgWeighted<'a, E>, + LiftedWeightedChunkedElgamal<'a, ::G1>, +>; -pub type Proof<'a, E> = sigma_protocol::Proof<::ScalarField, Homomorphism<'a, E>>; pub type WeightedProof<'a, E> = sigma_protocol::Proof<::ScalarField, WeightedHomomorphism<'a, E>>; -impl<'a, E: Pairing> Proof<'a, E> { - /// Generates a random looking proof (but not a valid one). - /// Useful for testing and benchmarking. - pub fn generate( - n: usize, - number_of_chunks: usize, - rng: &mut R, - ) -> Self { - // or should number_of_chunks be a const? - Self { - first_proof_item: FirstProofItem::Commitment(TupleCodomainShape( - TrivialShape(unsafe_random_point(rng)), // because TrivialShape is the codomain of univariate_hiding_kzg::CommitmentHomomorphism. TODO: develop generate() methods there? Maybe make it part of sigma_protocol::Trait ? - chunked_elgamal::CodomainShape { - chunks: vec![vec![unsafe_random_point(rng); number_of_chunks]; n], - randomness: vec![unsafe_random_point(rng); number_of_chunks], - }, - )), - z: HkzgElgamalWitness { - hkzg_randomness: - univariate_hiding_kzg::CommitmentRandomness::::rand(rng), - chunked_plaintexts: vec![ - vec![Scalar(sample_field_element(rng)); number_of_chunks]; - n - ], - elgamal_randomness: vec![Scalar(sample_field_element(rng)); number_of_chunks], - }, - } - } -} - impl<'a, E: Pairing> WeightedProof<'a, E> { /// Generates a random looking proof (but not a valid one). /// Useful for testing and benchmarking. pub fn generate( sc: &WeightedConfigArkworks, - number_of_chunks: usize, + number_of_chunks_per_share: usize, rng: &mut R, ) -> Self { - // or should number_of_chunks be a const? + // or should number_of_chunks_per_share be a const? Self { first_proof_item: FirstProofItem::Commitment(TupleCodomainShape( - TrivialShape(unsafe_random_point(rng)), // because TrivialShape is the codomain of univariate_hiding_kzg::CommitmentHomomorphism. TODO: develop generate() methods there? Maybe make it part of sigma_protocol::Trait ? + TrivialShape(unsafe_random_point_group(rng)), // because TrivialShape is the codomain of univariate_hiding_kzg::CommitmentHomomorphism. TODO: develop generate() methods there? Maybe make it part of sigma_protocol::Trait ? chunked_elgamal::WeightedCodomainShape { chunks: (0..sc.get_total_num_players()) .map(|i| { let w = sc.get_player_weight(&sc.get_player(i)); // TODO: combine these functions... (0..w) - .map(|_| unsafe_random_points(number_of_chunks, rng)) + .map(|_| { + unsafe_random_points_group(number_of_chunks_per_share, rng) + }) .collect() }) .collect(), randomness: vec![ - unsafe_random_points(number_of_chunks, rng); + unsafe_random_points_group(number_of_chunks_per_share, rng); sc.get_max_weight() ], }, @@ -211,13 +166,19 @@ impl<'a, E: Pairing> WeightedProof<'a, E> { let w = sc.get_player_weight(&sc.get_player(i)); // TODO: combine these functions... (0..w) .map(|_| { - Scalar::vec_from_inner(sample_field_elements(number_of_chunks, rng)) + Scalar::vec_from_inner(sample_field_elements( + number_of_chunks_per_share, + rng, + )) }) .collect() }) .collect(), elgamal_randomness: vec![ - vec![Scalar(sample_field_element(rng)); number_of_chunks]; + vec![ + Scalar(sample_field_element(rng)); + number_of_chunks_per_share + ]; sc.get_max_weight() ], }, @@ -225,70 +186,20 @@ impl<'a, E: Pairing> WeightedProof<'a, E> { } } -#[allow(non_snake_case)] -impl<'a, E: Pairing> Homomorphism<'a, E> { - pub fn new( - lagr_g1: &'a [E::G1Affine], - xi_1: E::G1Affine, - pp: &'a chunked_elgamal::PublicParameters, - eks: &'a [E::G1Affine], - ) -> Self { - // Set up the HKZG homomorphism, and use a projection map to lift it to HkzgElgamalWitness - let lifted_hkzg = LiftedHkzg:: { - hom: univariate_hiding_kzg::CommitmentHomomorphism { lagr_g1, xi_1 }, - // The projection map ignores the `elgamal_randomness` component, and flattens the vector of chunked plaintexts after adding a zero - projection: |dom: &HkzgElgamalWitness| { - let HkzgElgamalWitness { - hkzg_randomness, - chunked_plaintexts, - .. - } = dom; - let flattened_chunked_plaintexts: Vec> = - std::iter::once(Scalar(E::ScalarField::ZERO)) - .chain(chunked_plaintexts.iter().flatten().cloned()) - .collect(); - univariate_hiding_kzg::Witness:: { - hiding_randomness: hkzg_randomness.clone(), - values: flattened_chunked_plaintexts, - } - }, - }; - // Set up the chunked_elgamal homomorphism, and use a projection map to lift it to HkzgElgamalWitness - let lifted_chunked_elgamal = LiftedChunkedElgamal:: { - hom: chunked_elgamal::Homomorphism { pp, eks }, - // The projection map simply ignores the `hkzg_randomness` component - projection: |dom: &HkzgElgamalWitness| { - let HkzgElgamalWitness { - chunked_plaintexts, - elgamal_randomness, - .. - } = dom; - chunked_elgamal::Witness { - plaintext_chunks: chunked_plaintexts.clone(), - plaintext_randomness: elgamal_randomness.clone(), - } - }, - }; - - // Combine the two lifted homomorphisms just constructed, into the required TupleHomomorphism - Self { - hom1: lifted_hkzg, - hom2: lifted_chunked_elgamal, - } - } -} - #[allow(non_snake_case)] impl<'a, E: Pairing> WeightedHomomorphism<'a, E> { pub fn new( lagr_g1: &'a [E::G1Affine], xi_1: E::G1Affine, - pp: &'a chunked_elgamal::PublicParameters, + pp: &'a chunked_elgamal::PublicParameters, eks: &'a [E::G1Affine], ) -> Self { // Set up the HKZG homomorphism, and use a projection map to lift it to HkzgElgamalWitness let lifted_hkzg = LiftedHkzgWeighted:: { - hom: univariate_hiding_kzg::CommitmentHomomorphism { lagr_g1, xi_1 }, + hom: univariate_hiding_kzg::CommitmentHomomorphism { + msm_basis: lagr_g1, + xi_1, + }, // The projection map ignores the `elgamal_randomness` component, and flattens the vector of chunked plaintexts after adding a zero projection: |dom: &HkzgWeightedElgamalWitness| { let HkzgWeightedElgamalWitness { @@ -307,7 +218,7 @@ impl<'a, E: Pairing> WeightedHomomorphism<'a, E> { }, }; // Set up the chunked_elgamal homomorphism, and use a projection map to lift it to HkzgElgamalWitness - let lifted_chunked_elgamal = LiftedWeightedChunkedElgamal:: { + let lifted_chunked_elgamal = LiftedWeightedChunkedElgamal:: { hom: chunked_elgamal::WeightedHomomorphism { pp, eks }, // The projection map simply ignores the `hkzg_randomness` component projection: |dom: &HkzgWeightedElgamalWitness| { diff --git a/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal_commit.rs b/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal_commit.rs new file mode 100644 index 0000000000000..833d783511090 --- /dev/null +++ b/crates/aptos-dkg/src/pvss/chunky/hkzg_chunked_elgamal_commit.rs @@ -0,0 +1,106 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +use crate::{ + pvss::chunky::{ + chunked_elgamal, chunked_scalar_mul, hkzg_chunked_elgamal, + hkzg_chunked_elgamal::HkzgWeightedElgamalWitness, + }, + sigma_protocol, + sigma_protocol::{ + homomorphism::{ + tuple::{PairingTupleHomomorphism, TupleCodomainShape}, + LiftHomomorphism, + }, + traits::FirstProofItem, + }, +}; +use aptos_crypto::{ + arkworks::random::unsafe_random_points_group, weighted_config::WeightedConfigArkworks, + SecretSharingConfig, +}; +use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; + +type HkzgElgamalHomomorphism<'a, E> = hkzg_chunked_elgamal::WeightedHomomorphism<'a, E>; +type LiftedCommitHomomorphism = LiftHomomorphism< + chunked_scalar_mul::Homomorphism, + HkzgWeightedElgamalWitness<<::Affine as AffineRepr>::ScalarField>, +>; + +pub type Homomorphism<'a, E> = PairingTupleHomomorphism< + E, + HkzgElgamalHomomorphism<'a, E>, + LiftedCommitHomomorphism<::G2>, +>; +pub type Proof<'a, E> = sigma_protocol::Proof<::ScalarField, Homomorphism<'a, E>>; + +impl<'a, E: Pairing> Proof<'a, E> { + /// Generates a random looking proof (but not a valid one). + /// Useful for testing and benchmarking. + pub fn generate( + sc: &WeightedConfigArkworks, + number_of_chunks_per_share: usize, + rng: &mut R, + ) -> Self { + // or should number_of_chunks_per_share be a const? + let hkzg_chunked_elgamal::WeightedProof:: { + first_proof_item, + z, + } = hkzg_chunked_elgamal::WeightedProof::generate(sc, number_of_chunks_per_share, rng); + match first_proof_item { + FirstProofItem::Commitment(first_proof_item_inner) => { + Self { + first_proof_item: FirstProofItem::Commitment(TupleCodomainShape( + first_proof_item_inner, + chunked_scalar_mul::CodomainShape::( + (0..sc.get_total_num_players()) // TODO: make this stuff less complicated!!! + .map(|i| { + let w = sc.get_player_weight(&sc.get_player(i)); // TODO: combine these functions... + unsafe_random_points_group(w, rng) + }) + .collect(), + ), + )), + z, + } + }, + FirstProofItem::Challenge(_) => { + panic!("Unexpected Challenge variant!"); + }, + } + } +} + +#[allow(non_snake_case)] +impl<'a, E: Pairing> Homomorphism<'a, E> { + pub fn new( + lagr_g1: &'a [E::G1Affine], + xi_1: E::G1Affine, + pp: &'a chunked_elgamal::PublicParameters, + eks: &'a [E::G1Affine], + base: E::G2Affine, + ell: u8, + ) -> Self { + // Set up the HKZG-EG homomorphism, and use a projection map to lift it to HkzgElgamalCommitWitness + let hkzg_el_hom = + hkzg_chunked_elgamal::WeightedHomomorphism::::new(lagr_g1, xi_1, pp, eks); + + // Set up the lifted commit homomorphism + let lifted_commit_hom = LiftedCommitHomomorphism:: { + hom: chunked_scalar_mul::Homomorphism { base, ell }, + // The projection map simply unchunks the chunks + projection: |dom: &HkzgWeightedElgamalWitness| { + chunked_scalar_mul::Witness { + chunked_values: dom.chunked_plaintexts.clone(), + } + }, + }; + + // Combine the two lifted homomorphisms just constructed, into the required `TupleHomomorphism` + Self { + hom1: hkzg_el_hom, + hom2: lifted_commit_hom, + _pairing: std::marker::PhantomData, + } + } +} diff --git a/crates/aptos-dkg/src/pvss/chunky/keys.rs b/crates/aptos-dkg/src/pvss/chunky/keys.rs index c1be2ce4db6de..3b1419d28cbfe 100644 --- a/crates/aptos-dkg/src/pvss/chunky/keys.rs +++ b/crates/aptos-dkg/src/pvss/chunky/keys.rs @@ -71,11 +71,11 @@ impl Uniform for DecryptPrivKey { } } -impl traits::Convert, chunked_elgamal::PublicParameters> +impl traits::Convert, chunked_elgamal::PublicParameters> for DecryptPrivKey { /// Given a decryption key $dk$, computes its associated encryption key $H^{dk}$ - fn to(&self, pp_elgamal: &chunked_elgamal::PublicParameters) -> EncryptPubKey { + fn to(&self, pp_elgamal: &chunked_elgamal::PublicParameters) -> EncryptPubKey { EncryptPubKey:: { ek: pp_elgamal.pubkey_base().mul(self.dk).into_affine(), } diff --git a/crates/aptos-dkg/src/pvss/chunky/mod.rs b/crates/aptos-dkg/src/pvss/chunky/mod.rs index 66735a0e72581..401728ccb0401 100644 --- a/crates/aptos-dkg/src/pvss/chunky/mod.rs +++ b/crates/aptos-dkg/src/pvss/chunky/mod.rs @@ -4,22 +4,25 @@ use crate::pvss::signed::GenericSigning; use ark_ec::pairing::Pairing; -mod chunked_elgamal; -mod chunks; +pub mod chunked_elgamal; +pub mod chunked_scalar_mul; // needs to be `pub` for tests +pub mod chunks; mod hkzg_chunked_elgamal; +mod hkzg_chunked_elgamal_commit; mod input_secret; mod keys; -mod public_parameters; -mod transcript; +pub mod public_parameters; mod weighted_transcript; +mod weighted_transcriptv2; pub use public_parameters::DEFAULT_ELL_FOR_TESTING; -pub use transcript::{ - SubTranscript as UnweightedSubtranscript, Transcript as UnsignedUnweightedTranscript, -}; pub use weighted_transcript::{ - SubTranscript as WeightedSubtranscript, Transcript as UnsignedWeightedTranscript, + Subtranscript as WeightedSubtranscript, Transcript as UnsignedWeightedTranscript, +}; +pub use weighted_transcriptv2::{ + Subtranscript as WeightedSubtranscriptv2, Transcript as UnsignedWeightedTranscriptv2, }; - #[allow(type_alias_bounds)] pub type SignedWeightedTranscript = GenericSigning>; +#[allow(type_alias_bounds)] +pub type SignedWeightedTranscriptv2 = GenericSigning>; diff --git a/crates/aptos-dkg/src/pvss/chunky/public_parameters.rs b/crates/aptos-dkg/src/pvss/chunky/public_parameters.rs index 28c6694e4336f..f80632d2b095b 100644 --- a/crates/aptos-dkg/src/pvss/chunky/public_parameters.rs +++ b/crates/aptos-dkg/src/pvss/chunky/public_parameters.rs @@ -44,7 +44,7 @@ fn compute_powers_of_radix(ell: u8) -> Vec { #[allow(non_snake_case)] pub struct PublicParameters { #[serde(serialize_with = "ark_se")] - pub pp_elgamal: chunked_elgamal::PublicParameters, // TODO: make this or instead of ? + pub pp_elgamal: chunked_elgamal::PublicParameters, #[serde(serialize_with = "ark_se")] pub pk_range_proof: dekart_univariate_v2::ProverKey, @@ -74,7 +74,7 @@ impl<'de, E: Pairing> Deserialize<'de> for PublicParameters { #[derive(Deserialize)] struct SerializedFields { #[serde(deserialize_with = "ark_de")] - pp_elgamal: chunked_elgamal::PublicParameters, + pp_elgamal: chunked_elgamal::PublicParameters, #[serde(deserialize_with = "ark_de")] pk_range_proof: dekart_univariate_v2::ProverKey, #[serde(deserialize_with = "ark_de")] @@ -124,7 +124,7 @@ impl Valid for PublicParameters { } impl traits::HasEncryptionPublicParams for PublicParameters { - type EncryptionPublicParameters = chunked_elgamal::PublicParameters; + type EncryptionPublicParameters = chunked_elgamal::PublicParameters; fn get_encryption_public_params(&self) -> &Self::EncryptionPublicParameters { &self.pp_elgamal diff --git a/crates/aptos-dkg/src/pvss/chunky/transcript.rs b/crates/aptos-dkg/src/pvss/chunky/transcript.rs deleted file mode 100644 index 5d2d1cdc70ff6..0000000000000 --- a/crates/aptos-dkg/src/pvss/chunky/transcript.rs +++ /dev/null @@ -1,752 +0,0 @@ -// Copyright (c) Aptos Foundation -// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE - -use crate::{ - dlog::bsgs, - pcs::univariate_hiding_kzg, - pvss::{ - chunky::{ - chunked_elgamal::{self, num_chunks_per_scalar}, - chunks, - hkzg_chunked_elgamal::{self, HkzgElgamalWitness}, - input_secret::InputSecret, - keys, - public_parameters::PublicParameters, - }, - traits::{ - self, - transcript::{Aggregatable, MalleableTranscript}, - HasEncryptionPublicParams, - }, - Player, - }, - range_proofs::{dekart_univariate_v2, traits::BatchedRangeProof}, - sigma_protocol::{ - self, - homomorphism::{tuple::TupleCodomainShape, Trait as _}, - traits::Trait as _, - }, - traits::transcript::HasAggregatableSubtranscript, - Scalar, -}; -use anyhow::bail; -use aptos_crypto::{ - arkworks::{ - self, - random::{ - sample_field_element, sample_field_elements, unsafe_random_point, unsafe_random_points, - UniformRand, - }, - scrape::LowDegreeTest, - serialization::{ark_de, ark_se}, - shamir::ShamirThresholdConfig, - }, - bls12381::{self, PrivateKey}, - utils, CryptoMaterialError, SecretSharingConfig as _, Signature, SigningKey, Uniform, - ValidCryptoMaterial, -}; -use ark_ec::{ - pairing::{Pairing, PairingOutput}, - AffineRepr, CurveGroup, VariableBaseMSM, -}; -use ark_ff::{AdditiveGroup, Fp, FpConfig}; -use ark_poly::EvaluationDomain; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use serde::{Deserialize, Serialize}; -use std::ops::{Mul, Sub}; - -/// Domain-separation tag (DST) used to ensure that all cryptographic hashes and -/// transcript operations within the protocol are uniquely namespaced -pub const DST: &[u8; 30] = b"APTOS_CHUNKY_FIELD_PVSS_FS_DST"; - -#[allow(non_snake_case)] -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -pub struct Transcript { - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - utrs: UnsignedTranscript, - sgn: bls12381::Signature, -} - -#[allow(non_snake_case)] -#[derive( - CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, -)] -pub struct UnsignedTranscript { - dealer: Player, - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - pub subtranscript: SubTranscript, - /// Proof (of knowledge) showing that the s_{i,j}'s in C are base-B representations (of the s_i's in V, but this is not part of the proof), and that the r_j's in R are used in C - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - pub sharing_proof: SharingProof, -} - -#[allow(non_snake_case)] -#[derive( - CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, -)] -pub struct SubTranscript { - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - pub Vs: Vec, - /// First chunked ElGamal component: C[i][j] = s_{i,j} * G + r_j * ek_i. Here s_i = \sum_j s_{i,j} * B^j // TODO: change notation because B is not a group element? - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - pub Cs: Vec>, // TODO: maybe make this and the other fields affine? The verifier will have to do it anyway... and we are trying to speed that up - /// Second chunked ElGamal component: R[j] = r_j * H - #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] - pub Rs: Vec, -} - -impl ValidCryptoMaterial for SubTranscript { - const AIP_80_PREFIX: &'static str = ""; - - fn to_bytes(&self) -> Vec { - // TODO: using `Result>` and `.map_err(|_| CryptoMaterialError::DeserializationError)` would be more consistent here? - bcs::to_bytes(&self).expect("Unexpected error during PVSS transcript serialization") - } -} - -impl TryFrom<&[u8]> for SubTranscript { - type Error = CryptoMaterialError; - - fn try_from(bytes: &[u8]) -> Result { - bcs::from_bytes::>(bytes) - .map_err(|_| CryptoMaterialError::DeserializationError) - } -} - -// TODO: Copy-paste ewww -impl, E: Pairing>> traits::Subtranscript - for SubTranscript -{ - type DealtPubKey = keys::DealtPubKey; - type DealtPubKeyShare = keys::DealtPubKeyShare; - type DealtSecretKey = keys::DealtSecretKey; - type DealtSecretKeyShare = keys::DealtSecretKeyShare; - type DecryptPrivKey = keys::DecryptPrivKey; - type EncryptPubKey = keys::EncryptPubKey; - type PublicParameters = PublicParameters; - type SecretSharingConfig = SecretSharingConfig; - - fn get_public_key_share( - &self, - _sc: &Self::SecretSharingConfig, - player: &Player, - ) -> Self::DealtPubKeyShare { - Self::DealtPubKeyShare::new(Self::DealtPubKey::new(self.Vs[player.id].into_affine())) - } - - fn get_dealt_public_key(&self) -> Self::DealtPubKey { - Self::DealtPubKey::new(self.Vs.last().expect("V is empty somehow").into_affine()) - } - - #[allow(non_snake_case)] - fn decrypt_own_share( - &self, - _sc: &Self::SecretSharingConfig, - player: &Player, - dk: &Self::DecryptPrivKey, - pp: &Self::PublicParameters, - ) -> (Self::DealtSecretKeyShare, Self::DealtPubKeyShare) { - let C_i = &self.Cs[player.id]; // where in notation `C_i`, `i` denotes `player.id` - - let ephemeral_keys: Vec<_> = self.Rs.iter().map(|R_i| R_i.mul(dk.dk)).collect(); - assert_eq!( - ephemeral_keys.len(), - C_i.len(), - "Number of ephemeral keys does not match the number of ciphertext chunks" - ); - let dealt_encrypted_secret_key_share_chunks: Vec<_> = C_i - .iter() - .zip(ephemeral_keys.iter()) - .map(|(C_ij, ephemeral_key)| C_ij.sub(ephemeral_key)) - .collect(); - - let dealt_chunked_secret_key_share = bsgs::dlog_vec( - pp.pp_elgamal.G.into_group(), - &dealt_encrypted_secret_key_share_chunks, - &pp.table, - pp.get_dlog_range_bound(), - ) - .expect("BSGS dlog failed"); - - let dealt_chunked_secret_key_share_fr: Vec = dealt_chunked_secret_key_share - .iter() - .map(|&x| E::ScalarField::from(x)) - .collect(); - - let dealt_secret_key_share = - chunks::le_chunks_to_scalar(pp.ell, &dealt_chunked_secret_key_share_fr); - - let dealt_pub_key_share = self.Vs[player.id].into_affine(); // G_2^{f(\omega^i}) - - ( - Scalar(dealt_secret_key_share), - Self::DealtPubKeyShare::new(Self::DealtPubKey::new(dealt_pub_key_share)), // TODO: review this formalism - ) - } -} - -impl, E: Pairing>> - HasAggregatableSubtranscript for Transcript -{ - type Subtranscript = SubTranscript; - - fn get_subtranscript(&self) -> Self::Subtranscript { - self.utrs.subtranscript.clone() - } - - #[allow(non_snake_case)] - fn verify( - &self, - sc: &::SecretSharingConfig, - pp: &::PublicParameters, - spks: &[::SigningPubKey], - eks: &[::EncryptPubKey], - sid: &A, - ) -> anyhow::Result<()> { - if eks.len() != sc.n { - bail!("Expected {} encryption keys, but got {}", sc.n, eks.len()); - } - if self.utrs.subtranscript.Cs.len() != sc.n { - bail!( - "Expected {} arrays of chunked ciphertexts, but got {}", - sc.n, - self.utrs.subtranscript.Cs.len() - ); - } - if self.utrs.subtranscript.Vs.len() != sc.n + 1 { - bail!( - "Expected {} commitment elements, but got {}", - sc.n + 1, - self.utrs.subtranscript.Vs.len() - ); - } - - // Initialize the PVSS Fiat-Shamir context - let sok_ctxt = ( - *sc, - &spks[self.utrs.dealer.id], - sid, - self.utrs.dealer.id, - DST.to_vec(), - ); // This is a bit hacky; also get rid of DST here and use self.dst? - - // Verify the transcript signature - self.sgn.verify(&self.utrs, &spks[self.utrs.dealer.id])?; - - { - // Verify the PoK - let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); - let hom = hkzg_chunked_elgamal::Homomorphism::new( - &pp.pk_range_proof.ck_S.lagr_g1, - pp.pk_range_proof.ck_S.xi_1, - &pp.pp_elgamal, - &eks_inner, - ); - if let Err(err) = hom.verify( - &TupleCodomainShape( - self.utrs.sharing_proof.range_proof_commitment.clone(), - chunked_elgamal::CodomainShape { - chunks: self.utrs.subtranscript.Cs.clone(), - randomness: self.utrs.subtranscript.Rs.clone(), - }, - ), - &self.utrs.sharing_proof.PoK, - &sok_ctxt, - ) { - bail!("PoK verification failed: {:?}", err); - } - - // Verify the range proof - if let Err(err) = self.utrs.sharing_proof.range_proof.verify( - &pp.pk_range_proof.vk, - sc.n * num_chunks_per_scalar::(pp.ell) as usize, - pp.ell as usize, - &self.utrs.sharing_proof.range_proof_commitment, - ) { - bail!("Range proof batch verification failed: {:?}", err); - } - } - - let mut rng = rand::thread_rng(); // TODO: make `rng` a parameter of fn verify()? - - // Do the SCRAPE LDT - let ldt = LowDegreeTest::random(&mut rng, sc.t, sc.n + 1, true, &sc.domain); // includes_zero is true here means it includes a commitment to f(0), which is in V[n] - ldt.low_degree_test_group(&self.utrs.subtranscript.Vs)?; - - // Now compute the final MSM // TODO: merge this multi_exp with the PoK verification, as in YOLO YOSO? - let mut base_vec = Vec::new(); - let mut exp_vec = Vec::new(); - - let beta = sample_field_element(&mut rng); - let powers_of_beta = utils::powers(beta, self.utrs.subtranscript.Cs.len() + 1); - - for i in 0..self.utrs.subtranscript.Cs.len() { - for j in 0..self.utrs.subtranscript.Cs[i].len() { - let base = self.utrs.subtranscript.Cs[i][j]; - let exp = pp.powers_of_radix[j] * powers_of_beta[i]; - base_vec.push(base); - exp_vec.push(exp); - } - } - - let weighted_Cs = E::G1::msm(&E::G1::normalize_batch(&base_vec), &exp_vec) - .expect("Failed to compute MSM of Cs in chunky"); - - let weighted_Vs = E::G2::msm( - &E::G2::normalize_batch( - &self.utrs.subtranscript.Vs[..self.utrs.subtranscript.Cs.len()], - ), - &powers_of_beta[..self.utrs.subtranscript.Cs.len()], - ) - .expect("Failed to compute MSM of Vs in chunky"); - - let res = E::multi_pairing( - [ - weighted_Cs.into_affine(), - *pp.get_encryption_public_params().message_base(), - ], - [pp.get_commitment_base(), (-weighted_Vs).into_affine()], - ); // Making things affine here rather than converting the two bases to group elements, since that's probably what they would be converted to anyway: https://github.com/arkworks-rs/algebra/blob/c1f4f5665504154a9de2345f464b0b3da72c28ec/ec/src/models/bls12/g1.rs#L14 - - if PairingOutput::::ZERO != res { - return Err(anyhow::anyhow!("Expected zero during multi-pairing check")); - } - - Ok(()) - } -} - -impl, E: Pairing>> Transcript { - // why are N and P needed? TODO: maybe integrate into deal() - #[allow(non_snake_case)] - pub fn encrypt_chunked_shares< - 'a, - A: Serialize + Clone, - R: rand_core::RngCore + rand_core::CryptoRng, - >( - f_evals: &[E::ScalarField], - eks: &[keys::EncryptPubKey], - _sc: &ShamirThresholdConfig, - pp: &PublicParameters, - sok_ctxt: SokContext<'a, A, E>, - rng: &mut R, - ) -> (Vec>, Vec, SharingProof) { - let sc = sok_ctxt.0; - - // Generate the required randomness - let hkzg_randomness = univariate_hiding_kzg::CommitmentRandomness::rand(rng); - - let elgamal_randomness = Scalar::vec_from_inner(chunked_elgamal::correlated_randomness( - rng, - 1 << pp.ell as u64, - num_chunks_per_scalar::(pp.ell), - )); - - // Chunk and flatten the shares - let f_evals_chunked: Vec> = f_evals - .iter() - .map(|f_eval| chunks::scalar_to_le_chunks(pp.ell, f_eval)) - .collect(); - // Flatten it now before `f_evals_chunked` is consumed in the next step - let f_evals_chunked_flat: Vec = - f_evals_chunked.iter().flatten().copied().collect(); - - // Now generate the encrypted shares and range proof commitment, together with its PoK, so: - // (1) Set up the witness - let witness = HkzgElgamalWitness { - hkzg_randomness, - chunked_plaintexts: Scalar::vecvec_from_inner(f_evals_chunked), - elgamal_randomness, - }; - // (2) Compute its image under the corresponding homomorphism, and prove knowledge of an inverse - // (2a) Set up the tuple homomorphism - let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); // TODO: this is a bit ugly - let hom = hkzg_chunked_elgamal::Homomorphism::new( - &pp.pk_range_proof.ck_S.lagr_g1, - pp.pk_range_proof.ck_S.xi_1, - &pp.pp_elgamal, - &eks_inner, - ); - // (2b) Compute its image (the public statement), so the range proof commitment and chunked_elgamal encryptions - let statement = hom.apply(&witness); - // (2c) Prove knowledge of its inverse - let PoK = hom - .prove(&witness, &statement, &sok_ctxt, rng) - .change_lifetime(); // Make sure the lifetime of the proof is not coupled to `hom` which has references - // TODO: don't do &mut but just pass it - - // Destructure the "public statement" of the above sigma protocol - let TupleCodomainShape( - range_proof_commitment, - chunked_elgamal::CodomainShape { - chunks: Cs, - randomness: Rs, - }, - ) = statement; - - debug_assert_eq!( - Cs.len(), - sc.n, - "Number of encrypted chunks must equal number of players" - ); - - // Generate the batch range proof, given the `range_proof_commitment` produced in the PoK - let range_proof = dekart_univariate_v2::Proof::prove( - &pp.pk_range_proof, - &f_evals_chunked_flat, - pp.ell as usize, - &range_proof_commitment, - &hkzg_randomness, - rng, - ); // TODO: don't do &mut fs_t but just pass it - - // Assemble the sharing proof - let sharing_proof = SharingProof { - PoK, - range_proof, - range_proof_commitment, - }; - - (Cs, Rs, sharing_proof) - } -} - -impl Aggregatable for SubTranscript { - type SecretSharingConfig = SecretSharingConfig; - - fn aggregate_with( - &mut self, - sc: &SecretSharingConfig, - other: &Self, - ) -> anyhow::Result<()> { - debug_assert_eq!(self.Cs.len(), sc.n); - debug_assert_eq!(self.Vs.len(), sc.n + 1); - debug_assert_eq!(self.Cs.len(), other.Cs.len()); - debug_assert_eq!(self.Rs.len(), other.Rs.len()); - debug_assert_eq!(self.Vs.len(), other.Vs.len()); - - for i in 0..sc.n { - self.Vs[i] += other.Vs[i]; - for j in 0..self.Cs[i].len() { - self.Cs[i][j] += other.Cs[i][j]; - } - } - self.Vs[sc.n] += other.Vs[sc.n]; - - for (r_self, r_other) in self.Rs.iter_mut().zip(&other.Rs) { - *r_self += r_other; - } - - Ok(()) - } -} - -// ================================================================ -// IMPLEMENTATION OF UNSIGNED TRANSCRIPT HASHER -// ================================================================ - -/// Cryptographic hasher for an BCS-serializable UnsignedTranscript -#[derive(Clone)] -pub struct UnsignedTranscriptHasher(aptos_crypto::hash::DefaultHasher); - -impl UnsignedTranscriptHasher { - fn new() -> Self { - const DOMAIN: &[u8] = b"UnsignedTranscript"; - - UnsignedTranscriptHasher(aptos_crypto::hash::DefaultHasher::new(DOMAIN)) - } -} - -static UNSIGNED_TRANSCRIPT_HASHER: aptos_crypto::_once_cell::sync::Lazy = - aptos_crypto::_once_cell::sync::Lazy::new(|| UnsignedTranscriptHasher::new()); - -impl Default for UnsignedTranscriptHasher { - fn default() -> Self { - UNSIGNED_TRANSCRIPT_HASHER.clone() - } -} - -impl aptos_crypto::hash::CryptoHasher for UnsignedTranscriptHasher { - fn seed() -> &'static [u8; 32] { - // Directly compute a fixed seed from the domain string. - const DOMAIN: &[u8] = b"UnsignedTranscript"; - - // Compute once and leak to get 'static - Box::leak(Box::new(aptos_crypto::hash::DefaultHasher::prefixed_hash( - DOMAIN, - ))) - } - - fn update(&mut self, bytes: &[u8]) { - self.0.update(bytes); - } - - fn finish(self) -> aptos_crypto::hash::HashValue { - self.0.finish() - } -} - -impl std::io::Write for UnsignedTranscriptHasher { - fn write(&mut self, bytes: &[u8]) -> std::io::Result { - self.0.update(bytes); - Ok(bytes.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - -// ================================================================ -// END IMPLEMENTATION OF UNSIGNED TRANSCRIPT HASHER -// ================================================================ - -// ================================================================ -// IMPLEMENTATION OF UNSIGNED TRANSCRIPT BCS HASH -// ================================================================ - -use aptos_crypto::hash::{CryptoHash, HashValue}; - -/// Manual implementation of `BCSCryptoHash` for `UnsignedTranscript` -impl CryptoHash for UnsignedTranscript -where - UnsignedTranscript: Serialize, -{ - type Hasher = UnsignedTranscriptHasher; - - fn hash(&self) -> HashValue { - use aptos_crypto::hash::CryptoHasher; - - let mut state = Self::Hasher::default(); - // If BCS serialization fails, this is a programmer error - bcs::serialize_into(&mut state, &self) - .expect("BCS serialization of UnsignedTranscript should not fail"); - state.finish() - } -} - -// ================================================================ -// END IMPLEMENTATION OF UNSIGNED TRANSCRIPT BCS HASH -// ================================================================ - -#[allow(non_snake_case)] -#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] -pub struct SharingProof { - /// Proof showing knowledge of `witnesses` s_{i,j} yielding the commitment and the C and the R - pub PoK: sigma_protocol::Proof>, // static because we don't want the lifetime of the Proof to depend on the Homomorphism TODO: try removing it? - /// A batched range proof showing that all committed values s_{i,j} lie in some range - pub range_proof: dekart_univariate_v2::Proof, - /// A KZG-style commitment to the values s_{i,j} going into the range proof - pub range_proof_commitment: - as BatchedRangeProof>::Commitment, -} - -impl ValidCryptoMaterial for Transcript { - const AIP_80_PREFIX: &'static str = ""; - - fn to_bytes(&self) -> Vec { - // TODO: using `Result>` and `.map_err(|_| CryptoMaterialError::DeserializationError)` would be more consistent here? - bcs::to_bytes(&self).expect("Unexpected error during PVSS transcript serialization") - } -} - -impl TryFrom<&[u8]> for Transcript { - type Error = CryptoMaterialError; - - fn try_from(bytes: &[u8]) -> Result { - bcs::from_bytes::>(bytes) - .map_err(|_| CryptoMaterialError::DeserializationError) - } -} - -// Temporary hack, will deal with this at some point -#[allow(type_alias_bounds)] -type SokContext<'a, A: Serialize + Clone, E: Pairing> = ( - ShamirThresholdConfig, - bls12381::PublicKey, - &'a A, - usize, - Vec, -); - -// Not sure this alias is very useful -type SecretSharingConfig = ShamirThresholdConfig; - -impl, E: Pairing>> traits::Transcript - for Transcript -{ - type DealtPubKey = keys::DealtPubKey; - type DealtPubKeyShare = keys::DealtPubKeyShare; - type DealtSecretKey = keys::DealtSecretKey; - type DealtSecretKeyShare = keys::DealtSecretKeyShare; - type DecryptPrivKey = keys::DecryptPrivKey; - type EncryptPubKey = keys::EncryptPubKey; - type InputSecret = InputSecret; - type PublicParameters = PublicParameters; - type SecretSharingConfig = SecretSharingConfig; - type SigningPubKey = bls12381::PublicKey; - type SigningSecretKey = bls12381::PrivateKey; - - fn scheme_name() -> String { - "chunky_pvss".to_string() - } - - fn dst() -> Vec { - DST.to_vec() - } - - #[allow(non_snake_case)] - fn deal( - sc: &Self::SecretSharingConfig, - pp: &Self::PublicParameters, - ssk: &Self::SigningSecretKey, - spk: &Self::SigningPubKey, - eks: &[Self::EncryptPubKey], - s: &Self::InputSecret, - session_id: &A, - dealer: &Player, - rng: &mut R, - ) -> Self { - debug_assert_eq!( - eks.len(), - sc.n, - "Number of encryption keys must equal number of players" - ); - - // Initialize the PVSS Fiat-Shamir context - let sok_ctxt = (*sc, spk.clone(), session_id, dealer.id, DST.to_vec()); // This is a bit hacky; also get rid of DST here and use self.dst? - - // Generate the Shamir secret sharing polynomial - let mut f = vec![*s.get_secret_a()]; // constant term of polynomial - f.extend(sample_field_elements::( - sc.get_threshold() - 1, - rng, - )); // these are the remaining coefficients; total degree is `t - 1` - - // Generate its `n` evaluations (shares) by doing an FFT over the whole domain, then truncating - let mut f_evals = sc.domain.fft(&f); - f_evals.truncate(sc.n); - debug_assert_eq!(f_evals.len(), sc.n); - - // Encrypt the chunked shares and generate the sharing proof - let (Cs, Rs, sharing_proof) = - Self::encrypt_chunked_shares(&f_evals, eks, sc, pp, sok_ctxt, rng); - - // Add constant term for the `\mathbb{G}_2` commitment (we're doing this - // **after** the previous step because we're now mutating `f_evals` by enlarging it; this is a silly - // technicality however, it has no impact on computational complexity whatsoever as we could simply - // modify the `commit_to_scalars()` function to take another input) - f_evals.push(f[0]); // or *s.get_secret_a() - - // Commit to polynomial evaluations + constant term - let G_2 = pp.get_commitment_base(); - let Vs = arkworks::commit_to_scalars(&G_2, &f_evals); - debug_assert_eq!(Vs.len(), sc.n + 1); - - // Construct the **unsigned** transcript struct with all computed values - let utrs = UnsignedTranscript { - dealer: *dealer, - subtranscript: SubTranscript { Vs, Cs, Rs }, - sharing_proof, - }; - - // Now sign this transcript - let sgn = ssk - .sign(&utrs) - .expect("signing of `chunky` PVSS transcript failed"); - - Transcript { utrs, sgn } - } - - fn get_dealers(&self) -> Vec { - vec![self.utrs.dealer] - } - - #[allow(non_snake_case)] - fn generate(sc: &Self::SecretSharingConfig, pp: &Self::PublicParameters, rng: &mut R) -> Self - where - R: rand_core::RngCore + rand_core::CryptoRng, - { - let num_chunks_per_share = num_chunks_per_scalar::(pp.ell) as usize; - let utrs = UnsignedTranscript { - dealer: sc.get_player(0), - subtranscript: SubTranscript { - Vs: unsafe_random_points::(sc.n + 1, rng), - Cs: (0..sc.n) - .map(|_| unsafe_random_points(num_chunks_per_share, rng)) - .collect::>(), // TODO: would this become faster if generated in one batch and flattened? - Rs: unsafe_random_points(num_chunks_per_share, rng), - }, - sharing_proof: SharingProof { - range_proof_commitment: sigma_protocol::homomorphism::TrivialShape( - unsafe_random_point(rng), - ), - PoK: hkzg_chunked_elgamal::Proof::generate( - (sc.n - 1).next_power_of_two() - 1, - num_chunks_per_share, - rng, - ), - range_proof: dekart_univariate_v2::Proof::generate(pp.ell, rng), - }, - }; - - let ssk = PrivateKey::generate(rng); - - let sgn = ssk - .sign(&utrs) - .expect("signing of PVSS transcript should have succeeded"); - - Transcript { utrs, sgn } - } - - fn get_public_key_share( - &self, - _sc: &Self::SecretSharingConfig, - player: &Player, - ) -> Self::DealtPubKeyShare { - // local use here since we have a `SubTranscript` struct in this file - use traits::Subtranscript; - self.utrs.subtranscript.get_public_key_share(_sc, &player) - } - - fn get_dealt_public_key(&self) -> Self::DealtPubKey { - // local use here since we have a `SubTranscript` struct in this file - use traits::Subtranscript; - self.utrs.subtranscript.get_dealt_public_key() - } - - #[allow(non_snake_case)] - fn decrypt_own_share( - &self, - _sc: &Self::SecretSharingConfig, - player: &Player, - dk: &Self::DecryptPrivKey, - pp: &Self::PublicParameters, - ) -> (Self::DealtSecretKeyShare, Self::DealtPubKeyShare) { - // local use here since we have a `SubTranscript` struct in this file - use traits::Subtranscript; - self.utrs - .subtranscript - .decrypt_own_share(_sc, player, dk, pp) - } -} - -impl, E: Pairing>> MalleableTranscript - for Transcript -{ - fn maul_signature( - &mut self, - _ssk: &Self::SigningSecretKey, - _aux: &A, - _player: &Player, - ) { - // TODO: We're not using this but it probably fails if we don't; but that would probably mean recomputing almost the entire transcript... but then that would require eks and pp - panic!("Doesn't work for this PVSS, at least for now"); - // self.utrs.dealer = *player; - - // let sgn = ssk - // .sign(&self.utrs) - // .expect("signing of `chunky` PVSS transcript failed"); - // self.sgn = sgn; - } -} diff --git a/crates/aptos-dkg/src/pvss/chunky/weighted_transcript.rs b/crates/aptos-dkg/src/pvss/chunky/weighted_transcript.rs index 1b0b315d86be7..c93cd703bcb5b 100644 --- a/crates/aptos-dkg/src/pvss/chunky/weighted_transcript.rs +++ b/crates/aptos-dkg/src/pvss/chunky/weighted_transcript.rs @@ -33,11 +33,12 @@ use aptos_crypto::{ arkworks::{ self, random::{ - sample_field_element, sample_field_elements, unsafe_random_point, unsafe_random_points, - UniformRand, + sample_field_element, sample_field_elements, unsafe_random_point_group, + unsafe_random_points_group, UniformRand, }, scrape::LowDegreeTest, serialization::{ark_de, ark_se}, + srs::SrsBasis, }, bls12381::{self}, utils, @@ -64,7 +65,7 @@ pub struct Transcript { dealer: Player, #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] /// This is the aggregatable subtranscript - pub subtrs: SubTranscript, + pub subtrs: Subtranscript, /// Proof (of knowledge) showing that the s_{i,j}'s in C are base-B representations (of the s_i's in V, but this is not part of the proof), and that the r_j's in R are used in C #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] pub sharing_proof: SharingProof, @@ -74,7 +75,7 @@ pub struct Transcript { #[derive( CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, Clone, Debug, PartialEq, Eq, )] -pub struct SubTranscript { +pub struct Subtranscript { // The dealt public key #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] pub V0: E::G2, @@ -89,7 +90,7 @@ pub struct SubTranscript { pub Rs: Vec>, } -impl ValidCryptoMaterial for SubTranscript { +impl ValidCryptoMaterial for Subtranscript { const AIP_80_PREFIX: &'static str = ""; fn to_bytes(&self) -> Vec { @@ -98,11 +99,11 @@ impl ValidCryptoMaterial for SubTranscript { } } -impl TryFrom<&[u8]> for SubTranscript { +impl TryFrom<&[u8]> for Subtranscript { type Error = CryptoMaterialError; fn try_from(bytes: &[u8]) -> Result { - bcs::from_bytes::>(bytes) + bcs::from_bytes::>(bytes) .map_err(|_| CryptoMaterialError::DeserializationError) } } @@ -114,7 +115,7 @@ type SecretSharingConfig = WeightedConfigArkworks; impl, E: Pairing>> HasAggregatableSubtranscript for Transcript { - type Subtranscript = SubTranscript; + type Subtranscript = Subtranscript; fn get_subtranscript(&self) -> Self::Subtranscript { self.subtrs.clone() @@ -162,8 +163,14 @@ impl, E: Pairing>> { // Verify the PoK let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); - let hom = hkzg_chunked_elgamal::WeightedHomomorphism::new( - &pp.pk_range_proof.ck_S.lagr_g1, + let lagr_g1: &[E::G1Affine] = match &pp.pk_range_proof.ck_S.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1, + SrsBasis::PowersOfTau { .. } => { + bail!("Expected a Lagrange basis, received powers of tau basis instead") + }, + }; + let hom = hkzg_chunked_elgamal::WeightedHomomorphism::::new( + lagr_g1, pp.pk_range_proof.ck_S.xi_1, &pp.pp_elgamal, &eks_inner, @@ -280,7 +287,7 @@ impl, E: Pairing>> } impl, E: Pairing>> traits::Subtranscript - for SubTranscript + for Subtranscript { type DealtPubKey = keys::DealtPubKey; type DealtPubKeyShare = Vec>; @@ -373,7 +380,7 @@ impl, E: Pairing>> traits: } } -impl Aggregatable for SubTranscript { +impl Aggregatable for Subtranscript { type SecretSharingConfig = SecretSharingConfig; #[allow(non_snake_case)] @@ -451,6 +458,8 @@ type SokContext<'a, A: Serialize + Clone> = ( Vec, // This is for the DST ); +use crate::pvss::chunky::chunked_elgamal::decrypt_chunked_scalars; + impl, E: Pairing>> traits::Transcript for Transcript { @@ -467,7 +476,7 @@ impl, E: Pairing>> traits: type SigningSecretKey = bls12381::PrivateKey; fn scheme_name() -> String { - "chunky_pvss".to_string() + "chunky_v1".to_string() } /// Fetches the domain-separation tag (DST) @@ -528,7 +537,7 @@ impl, E: Pairing>> traits: Transcript { dealer: *dealer, - subtrs: SubTranscript { V0, Vs, Cs, Rs }, + subtrs: Subtranscript { V0, Vs, Cs, Rs }, sharing_proof, } } @@ -565,61 +574,33 @@ impl, E: Pairing>> traits: dk: &Self::DecryptPrivKey, pp: &Self::PublicParameters, ) -> (Self::DealtSecretKeyShare, Self::DealtPubKeyShare) { - let weight = sc.get_player_weight(player); - let Cs = &self.subtrs.Cs[player.id]; - - // TODO: put an assert here saying that len(Cs) = weight - - let ephemeral_keys: Vec<_> = self - .subtrs - .Rs - .iter() - .take(weight) - .map(|R_i_vec| R_i_vec.iter().map(|R_i| R_i.mul(dk.dk)).collect::>()) - .collect(); - - if let Some(first_key) = ephemeral_keys.first() { - debug_assert_eq!( - first_key.len(), - Cs[0].len(), - "Number of ephemeral keys does not match the number of ciphertext chunks" - ); + debug_assert_eq!(Cs.len(), sc.get_player_weight(player)); + + if !Cs.is_empty() { + if let Some(first_key) = self.subtrs.Rs.first() { + debug_assert_eq!( + first_key.len(), + Cs[0].len(), + "Number of ephemeral keys does not match the number of ciphertext chunks" + ); + } } - let mut sk_shares: Vec> = Vec::with_capacity(weight); let pk_shares = self.get_public_key_share(sc, player); - for i in 0..weight { - // TODO: should really put this in a separate function - let dealt_encrypted_secret_key_share_chunks: Vec<_> = Cs[i] - .iter() - .zip(ephemeral_keys[i].iter()) - .map(|(C_ij, ephemeral_key)| C_ij.sub(ephemeral_key)) - .collect(); - - let dealt_chunked_secret_key_share = bsgs::dlog_vec( - pp.pp_elgamal.G.into_group(), - &dealt_encrypted_secret_key_share_chunks, - &pp.table, - pp.get_dlog_range_bound(), - ) - .expect("BSGS dlog failed"); - - let dealt_chunked_secret_key_share_fr: Vec = - dealt_chunked_secret_key_share - .iter() - .map(|&x| E::ScalarField::from(x)) - .collect(); - - let dealt_secret_key_share = - chunks::le_chunks_to_scalar(pp.ell, &dealt_chunked_secret_key_share_fr); - - sk_shares.push(Scalar(dealt_secret_key_share)); - } + let sk_shares: Vec<_> = decrypt_chunked_scalars( + &Cs, + &self.subtrs.Rs, + &dk.dk, + &pp.pp_elgamal, + &pp.table, + pp.ell, + ); ( - sk_shares, pk_shares, // TODO: review this formalism... wh ydo we need this here? + Scalar::vec_from_inner(sk_shares), + pk_shares, // TODO: review this formalism... why do we need this here? ) } @@ -632,9 +613,9 @@ impl, E: Pairing>> traits: Transcript { dealer: sc.get_player(0), - subtrs: SubTranscript { - V0: unsafe_random_point::(rng), - Vs: sc.group_by_player(&unsafe_random_points::( + subtrs: Subtranscript { + V0: unsafe_random_point_group::(rng), + Vs: sc.group_by_player(&unsafe_random_points_group::( sc.get_total_weight(), rng, )), @@ -642,17 +623,17 @@ impl, E: Pairing>> traits: .map(|i| { let w = sc.get_player_weight(&sc.get_player(i)); // TODO: combine these functions... (0..w) - .map(|_| unsafe_random_points(num_chunks_per_share, rng)) + .map(|_| unsafe_random_points_group(num_chunks_per_share, rng)) .collect() // todo: use vec![vec![]]... like in the generate functions }) .collect(), Rs: (0..sc.get_max_weight()) - .map(|_| unsafe_random_points(num_chunks_per_share, rng)) + .map(|_| unsafe_random_points_group(num_chunks_per_share, rng)) .collect(), }, sharing_proof: SharingProof { range_proof_commitment: sigma_protocol::homomorphism::TrivialShape( - unsafe_random_point(rng), + unsafe_random_point_group(rng), ), SoK: hkzg_chunked_elgamal::WeightedProof::generate(sc, num_chunks_per_share, rng), range_proof: dekart_univariate_v2::Proof::generate(pp.ell, rng), @@ -685,6 +666,7 @@ impl, E: Pairing>> Transcr rng, 1 << pp.ell as u64, num_chunks_per_scalar::(pp.ell), + &E::ScalarField::ZERO, ) }) .collect(), @@ -711,8 +693,14 @@ impl, E: Pairing>> Transcr // (2) Compute its image under the corresponding homomorphism, and produce an SoK // (2a) Set up the tuple homomorphism let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); // TODO: this is a bit ugly - let hom = hkzg_chunked_elgamal::WeightedHomomorphism::new( - &pp.pk_range_proof.ck_S.lagr_g1, + let lagr_g1: &[E::G1Affine] = match &pp.pk_range_proof.ck_S.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1, + SrsBasis::PowersOfTau { .. } => { + panic!("Expected a Lagrange basis, received powers of tau basis instead") + }, + }; + let hom = hkzg_chunked_elgamal::WeightedHomomorphism::::new( + lagr_g1, pp.pk_range_proof.ck_S.xi_1, &pp.pp_elgamal, &eks_inner, diff --git a/crates/aptos-dkg/src/pvss/chunky/weighted_transcriptv2.rs b/crates/aptos-dkg/src/pvss/chunky/weighted_transcriptv2.rs new file mode 100644 index 0000000000000..939e3d74742c4 --- /dev/null +++ b/crates/aptos-dkg/src/pvss/chunky/weighted_transcriptv2.rs @@ -0,0 +1,1070 @@ +// Copyright (c) Aptos Foundation +// Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE + +use crate::{ + dlog::bsgs, + pcs::univariate_hiding_kzg, + pvss::{ + chunky::{ + chunked_elgamal::{self, num_chunks_per_scalar}, + chunked_scalar_mul, chunks, + hkzg_chunked_elgamal::HkzgWeightedElgamalWitness, + hkzg_chunked_elgamal_commit, + input_secret::InputSecret, + keys, + public_parameters::PublicParameters, + }, + traits::{ + self, + transcript::{Aggregatable, HasAggregatableSubtranscript, MalleableTranscript}, + }, + Player, + }, + range_proofs::{dekart_univariate_v2, traits::BatchedRangeProof}, + sigma_protocol::{ + self, + homomorphism::{tuple::TupleCodomainShape, Trait as _}, + }, + Scalar, +}; +use anyhow::bail; +use aptos_crypto::{ + arkworks::{ + random::{ + sample_field_elements, unsafe_random_point_group, unsafe_random_points_group, + UniformRand, + }, + scrape::LowDegreeTest, + serialization::{ark_de, ark_se, BatchSerializable}, + srs::SrsBasis, + }, + bls12381::{self}, + weighted_config::WeightedConfigArkworks, + CryptoMaterialError, SecretSharingConfig as _, ValidCryptoMaterial, +}; +use ark_ec::{pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ff::{AdditiveGroup, Fp, FpConfig}; +use ark_poly::EvaluationDomain; +use ark_serialize::{ + CanonicalDeserialize, CanonicalSerialize, Compress, Read, SerializationError, Valid, Validate, + Write, +}; +use serde::{Deserialize, Serialize}; +use std::ops::{Mul, Sub}; + +/// Domain-separation tag (DST) used to ensure that all cryptographic hashes and +/// transcript operations within the protocol are uniquely namespaced +pub const DST: &[u8; 42] = b"APTOS_WEIGHTED_CHUNKY_FIELD_PVSS_v2_FS_DST"; + +#[allow(non_snake_case)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +pub struct Transcript { + dealer: Player, + #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] + /// This is the aggregatable subtranscript + pub subtrs: Subtranscript, + /// Proof (of knowledge) showing that the s_{i,j}'s in C are base-B representations (of the s_i's in V, but this is not part of the proof), and that the r_j's in R are used in C + #[serde(serialize_with = "ark_se", deserialize_with = "ark_de")] + pub sharing_proof: SharingProof, +} + +#[allow(non_snake_case)] +#[derive(Deserialize, Clone, Debug, PartialEq, Eq)] +pub struct Subtranscript { + // The dealt public key + #[serde(deserialize_with = "ark_de")] + pub V0: E::G2, + // The dealt public key shares + #[serde(deserialize_with = "ark_de")] + pub Vs: Vec>, + /// First chunked ElGamal component: C[i][j] = s_{i,j} * G + r_j * ek_i. Here s_i = \sum_j s_{i,j} * B^j // TODO: change notation because B is not a group element? + #[serde(deserialize_with = "ark_de")] + pub Cs: Vec>>, // TODO: maybe make this and the other fields affine? The verifier will have to do it anyway... and we are trying to speed that up + /// Second chunked ElGamal component: R[j] = r_j * H + #[serde(deserialize_with = "ark_de")] + pub Rs: Vec>, +} + +#[allow(non_snake_case)] +impl BatchSerializable for Subtranscript { + fn collect_points(&self, g1: &mut Vec, g2: &mut Vec) { + g2.push(self.V0); + + for player_Vs in &self.Vs { + g2.extend(player_Vs.iter().copied()); + } + + for player_Cs in &self.Cs { + for chunks in player_Cs { + g1.extend(chunks.iter().copied()); + } + } + + for weight_Rs in &self.Rs { + g1.extend(weight_Rs.iter().copied()); + } + } + + fn serialize_from_affine( + &self, + mut writer: &mut W, + compress: Compress, + g1_iter: &mut impl Iterator, + g2_iter: &mut impl Iterator, + ) -> Result<(), SerializationError> { + // + // 1. Reconstruct nested affine structures + // + + // V0 + let V0_affine = g2_iter.next().unwrap(); + + // Vs + let Vs_affine: Vec> = self + .Vs + .iter() + .map(|row| row.iter().map(|_| g2_iter.next().unwrap()).collect()) + .collect(); + + // Cs + let Cs_affine: Vec>> = self + .Cs + .iter() + .map(|mat| { + mat.iter() + .map(|row| row.iter().map(|_| g1_iter.next().unwrap()).collect()) + .collect() + }) + .collect(); + + // Rs + let Rs_affine: Vec> = self + .Rs + .iter() + .map(|row| row.iter().map(|_| g1_iter.next().unwrap()).collect()) + .collect(); + + // + // 2. Serialize using canonical implementations + // + V0_affine.serialize_with_mode(&mut writer, compress)?; + Vs_affine.serialize_with_mode(&mut writer, compress)?; + Cs_affine.serialize_with_mode(&mut writer, compress)?; + Rs_affine.serialize_with_mode(&mut writer, compress)?; + + Ok(()) + } +} + +impl CanonicalSerialize for Subtranscript { + fn serialize_with_mode( + &self, + mut writer: W, + compress: Compress, + ) -> Result<(), SerializationError> { + let mut g1 = Vec::new(); + let mut g2 = Vec::new(); + + self.collect_points(&mut g1, &mut g2); + + let g1_affine = E::G1::normalize_batch(&g1); + let g2_affine = E::G2::normalize_batch(&g2); + + let mut g1_iter = g1_affine.into_iter(); + let mut g2_iter = g2_affine.into_iter(); + + >::serialize_from_affine( + self, + &mut writer, + compress, + &mut g1_iter, + &mut g2_iter, + )?; + + debug_assert!(g1_iter.next().is_none()); + debug_assert!(g2_iter.next().is_none()); + + Ok(()) + } + + fn serialized_size(&self, compress: Compress) -> usize { + // 1. V0 + let mut size = ::Affine::zero().serialized_size(compress); + + // 2. Vs (Vec>) + // Outer length + size += 4; + for row in &self.Vs { + size += 4; // inner row length + size += row.len() * ::Affine::zero().serialized_size(compress); + // this is the weight of player i + } + + // 3. Cs (Vec>>) + size += 4; // outer length + for mat in &self.Cs { + size += 4; // inner matrix length + for row in mat { + size += 4; // row length + size += row.len() * ::Affine::zero().serialized_size(compress); + // this can be done simpler + } + } + + // 4. Rs (Vec>) + size += 4; // outer length + for row in &self.Rs { + size += 4; // row length + size += row.len() * ::Affine::zero().serialized_size(compress); + // same, something like 4 + Rs.len() * (4 + Rs[0].len() * zero().serialized_size(compress)) + } + + size + } +} + +impl Serialize for Subtranscript { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut bytes = Vec::with_capacity(self.serialized_size(Compress::Yes)); + self.serialize_with_mode(&mut bytes, Compress::Yes) + .map_err(serde::ser::Error::custom)?; + + serializer.serialize_bytes(&bytes) + } +} + +// #[allow(non_snake_case)] +// impl CanonicalSerialize for Subtranscript { +// fn serialize_with_mode( +// &self, +// mut writer: W, +// compress: Compress, +// ) -> Result<(), SerializationError> { +// // +// // 1. Collect all G2 and G1 elements for batch normalization +// // +// let mut g2_elems = Vec::with_capacity(1 + self.Vs.iter().map(|r| r.len()).sum::()); +// let mut g1_elems = Vec::new(); + +// // V0 +// g2_elems.push(self.V0); + +// // Vs +// for row in &self.Vs { +// for v in row { +// g2_elems.push(*v); +// } +// } + +// // Cs +// for mat in &self.Cs { +// for row in mat { +// for c in row { +// g1_elems.push(*c); +// } +// } +// } + +// // Rs +// for row in &self.Rs { +// for r in row { +// g1_elems.push(*r); +// } +// } + +// // +// // 2. Batch normalize +// // +// let g2_affine = E::G2::normalize_batch(&g2_elems); +// let g1_affine = E::G1::normalize_batch(&g1_elems); + +// // +// // 3. Reconstruct nested structures in affine form +// // +// let mut g2_iter = g2_affine.into_iter(); +// let mut g1_iter = g1_affine.into_iter(); + +// // V0 +// let V0_affine = g2_iter.next().unwrap(); + +// // Vs_affine +// let Vs_affine: Vec> = self +// .Vs +// .iter() +// .map(|row| row.iter().map(|_| g2_iter.next().unwrap()).collect()) +// .collect(); + +// // Cs_affine +// let Cs_affine: Vec>> = self +// .Cs +// .iter() +// .map(|mat| { +// mat.iter() +// .map(|row| row.iter().map(|_| g1_iter.next().unwrap()).collect()) +// .collect() +// }) +// .collect(); + +// // Rs_affine +// let Rs_affine: Vec> = self +// .Rs +// .iter() +// .map(|row| row.iter().map(|_| g1_iter.next().unwrap()).collect()) +// .collect(); + +// // +// // 4. Serialize in canonical order using nested structure +// // +// V0_affine.serialize_with_mode(&mut writer, compress)?; +// Vs_affine.serialize_with_mode(&mut writer, compress)?; +// Cs_affine.serialize_with_mode(&mut writer, compress)?; +// Rs_affine.serialize_with_mode(&mut writer, compress)?; + +// Ok(()) +// } + +// fn serialized_size(&self, compress: Compress) -> usize { +// // 1. V0 +// let mut size = ::Affine::zero().serialized_size(compress); + +// // 2. Vs (Vec>) +// // Outer length +// size += 4; +// for row in &self.Vs { +// size += 4; // inner row length +// size += row.len() * ::Affine::zero().serialized_size(compress); +// // this is the weight of player i +// } + +// // 3. Cs (Vec>>) +// size += 4; // outer length +// for mat in &self.Cs { +// size += 4; // inner matrix length +// for row in mat { +// size += 4; // row length +// size += row.len() * ::Affine::zero().serialized_size(compress); +// // this can be done simpler +// } +// } + +// // 4. Rs (Vec>) +// size += 4; // outer length +// for row in &self.Rs { +// size += 4; // row length +// size += row.len() * ::Affine::zero().serialized_size(compress); +// // same, something like 4 + Rs.len() * (4 + Rs[0].len() * zero().serialized_size(compress)) +// } + +// size +// } +// } + +// `CanonicalDeserialize` needs `Valid` +impl Valid for Subtranscript { + fn check(&self) -> Result<(), SerializationError> { + Ok(()) + } +} + +#[allow(non_snake_case)] +impl CanonicalDeserialize for Subtranscript { + fn deserialize_with_mode( + mut reader: R, + compress: Compress, + validate: Validate, + ) -> Result { + // + // 1. Deserialize V0 (G2Affine -> G2 projective) + // + let V0_affine = + ::Affine::deserialize_with_mode(&mut reader, compress, validate)?; + let V0 = V0_affine.into(); + + // + // 2. Deserialize Vs (Vec>) -> Vec> + // + let Vs_affine: Vec::Affine>> = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let Vs: Vec> = Vs_affine + .into_iter() + .map(|row| row.into_iter().map(|p| p.into()).collect()) + .collect(); + + // + // 3. Deserialize Cs (Vec>>) -> Vec>> + // + let Cs_affine: Vec::Affine>>> = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let Cs: Vec>> = Cs_affine + .into_iter() + .map(|mat| { + mat.into_iter() + .map(|row| row.into_iter().map(|p| p.into()).collect()) + .collect() + }) + .collect(); + + // + // 4. Deserialize Rs (Vec>) -> Vec> + // + let Rs_affine: Vec::Affine>> = + CanonicalDeserialize::deserialize_with_mode(&mut reader, compress, validate)?; + let Rs: Vec> = Rs_affine + .into_iter() + .map(|row| row.into_iter().map(|p| p.into()).collect()) + .collect(); + + // + // 5. Construct the Subtranscript + // + Ok(Subtranscript { V0, Vs, Cs, Rs }) + } +} + +impl ValidCryptoMaterial for Subtranscript { + const AIP_80_PREFIX: &'static str = ""; + + fn to_bytes(&self) -> Vec { + // TODO: using `Result>` and `.map_err(|_| CryptoMaterialError::DeserializationError)` would be more consistent here? + bcs::to_bytes(&self).expect("Unexpected error during PVSS transcript serialization") + } +} + +impl TryFrom<&[u8]> for Subtranscript { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> Result { + bcs::from_bytes::>(bytes) + .map_err(|_| CryptoMaterialError::DeserializationError) + } +} + +/// This is the secret sharing config that will be used for weighted `chunky` +#[allow(type_alias_bounds)] +type SecretSharingConfig = WeightedConfigArkworks; + +impl, E: Pairing>> + HasAggregatableSubtranscript for Transcript +{ + type Subtranscript = Subtranscript; + + fn get_subtranscript(&self) -> Self::Subtranscript { + self.subtrs.clone() + } + + #[allow(non_snake_case)] + fn verify( + &self, + sc: &Self::SecretSharingConfig, + pp: &Self::PublicParameters, + spks: &[Self::SigningPubKey], + eks: &[Self::EncryptPubKey], + sid: &A, + ) -> anyhow::Result<()> { + if eks.len() != sc.get_total_num_players() { + bail!( + "Expected {} encryption keys, but got {}", + sc.get_total_num_players(), + eks.len() + ); + } + if self.subtrs.Cs.len() != sc.get_total_num_players() { + bail!( + "Expected {} arrays of chunked ciphertexts, but got {}", + sc.get_total_num_players(), + self.subtrs.Cs.len() + ); + } + if self.subtrs.Vs.len() != sc.get_total_num_players() { + bail!( + "Expected {} arrays of commitment elements, but got {}", + sc.get_total_num_players(), + self.subtrs.Vs.len() + ); + } + + // Initialize the **identical** PVSS SoK context + let sok_cntxt = ( + &spks[self.dealer.id], + sid.clone(), + self.dealer.id, + DST.to_vec(), + ); // As above, this is a bit hacky... though we have access to `self` now + + { + // Verify the PoK + let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); + let lagr_g1: &[E::G1Affine] = match &pp.pk_range_proof.ck_S.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1, + SrsBasis::PowersOfTau { .. } => { + bail!("Expected a Lagrange basis, received powers of tau basis instead") + }, + }; + let hom = hkzg_chunked_elgamal_commit::Homomorphism::::new( + lagr_g1, + pp.pk_range_proof.ck_S.xi_1, + &pp.pp_elgamal, + &eks_inner, + pp.get_commitment_base(), + pp.ell, + ); + if let Err(err) = hom.verify( + &TupleCodomainShape( + TupleCodomainShape( + self.sharing_proof.range_proof_commitment.clone(), + chunked_elgamal::WeightedCodomainShape { + chunks: self.subtrs.Cs.clone(), + randomness: self.subtrs.Rs.clone(), + }, + ), + chunked_scalar_mul::CodomainShape(self.subtrs.Vs.clone()), + ), + &self.sharing_proof.SoK, + &sok_cntxt, + ) { + bail!("PoK verification failed: {:?}", err); + } + + // Verify the range proof + if let Err(err) = self.sharing_proof.range_proof.verify( + &pp.pk_range_proof.vk, + sc.get_total_weight() * num_chunks_per_scalar::(pp.ell) as usize, + pp.ell as usize, + &self.sharing_proof.range_proof_commitment, + ) { + bail!("Range proof batch verification failed: {:?}", err); + } + } + + let mut rng = rand::thread_rng(); // TODO: make `rng` a parameter of fn verify()? + + // Do the SCRAPE LDT + let ldt = LowDegreeTest::random( + &mut rng, + sc.get_threshold_weight(), + sc.get_total_weight() + 1, + true, + &sc.get_threshold_config().domain, + ); // includes_zero is true here means it includes a commitment to f(0), which is in V[n] + let mut Vs_flat: Vec<_> = self.subtrs.Vs.iter().flatten().cloned().collect(); + Vs_flat.push(self.subtrs.V0); + // could add an assert_eq here with sc.get_total_weight() + ldt.low_degree_test_group(&Vs_flat)?; + + // let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); + // let hom = hkzg_chunked_elgamal::WeightedHomomorphism::new( + // &pp.pk_range_proof.ck_S.lagr_g1, + // pp.pk_range_proof.ck_S.xi_1, + // &pp.pp_elgamal, + // &eks_inner, + // ); + // let (sigma_bases, sigma_scalars, beta_powers) = hom.verify_msm_terms( + // &TupleCodomainShape( + // self.sharing_proof.range_proof_commitment.clone(), + // chunked_elgamal::WeightedCodomainShape { + // chunks: self.subtrs.Cs.clone(), + // randomness: self.subtrs.Rs.clone(), + // }, + // ), + // &self.sharing_proof.SoK, + // &sok_cntxt, + // ); + // let ldt_msm_terms = ldt.ldt_msm_input(&Vs_flat)?; + // use aptos_crypto::arkworks::msm::verify_msm_terms_with_start; + // verify_msm_terms_with_start(ldt_msm_terms, sigma_bases, sigma_scalars, beta_powers); + + Ok(()) + } +} + +use crate::pvss::chunky::chunked_elgamal::decrypt_chunked_scalars; + +impl, E: Pairing>> traits::Subtranscript + for Subtranscript +{ + type DealtPubKey = keys::DealtPubKey; + type DealtPubKeyShare = Vec>; + type DealtSecretKey = keys::DealtSecretKey; + type DealtSecretKeyShare = Vec>; + type DecryptPrivKey = keys::DecryptPrivKey; + type EncryptPubKey = keys::EncryptPubKey; + type PublicParameters = PublicParameters; + type SecretSharingConfig = SecretSharingConfig; + + #[allow(non_snake_case)] + fn get_public_key_share( + &self, + _sc: &Self::SecretSharingConfig, + player: &Player, + ) -> Self::DealtPubKeyShare { + self.Vs[player.id] + .iter() + .map(|&V_i| keys::DealtPubKeyShare::::new(keys::DealtPubKey::new(V_i.into_affine()))) + .collect() + } + + fn get_dealt_public_key(&self) -> Self::DealtPubKey { + Self::DealtPubKey::new(self.V0.into_affine()) + } + + #[allow(non_snake_case)] + fn decrypt_own_share( + &self, + sc: &Self::SecretSharingConfig, + player: &Player, + dk: &Self::DecryptPrivKey, + pp: &Self::PublicParameters, + ) -> (Self::DealtSecretKeyShare, Self::DealtPubKeyShare) { + let Cs = &self.Cs[player.id]; + debug_assert_eq!(Cs.len(), sc.get_player_weight(player)); + + if !Cs.is_empty() { + if let Some(first_key) = self.Rs.first() { + debug_assert_eq!( + first_key.len(), + Cs[0].len(), + "Number of ephemeral keys does not match the number of ciphertext chunks" + ); + } + } + + let pk_shares = self.get_public_key_share(sc, player); + + let sk_shares: Vec<_> = + decrypt_chunked_scalars(&Cs, &self.Rs, &dk.dk, &pp.pp_elgamal, &pp.table, pp.ell); + + ( + Scalar::vec_from_inner(sk_shares), + pk_shares, // TODO: review this formalism... why do we need this here? + ) + } +} + +impl Aggregatable for Subtranscript { + type SecretSharingConfig = SecretSharingConfig; + + #[allow(non_snake_case)] + fn aggregate_with(&mut self, sc: &SecretSharingConfig, other: &Self) -> anyhow::Result<()> { + debug_assert_eq!(self.Cs.len(), sc.get_total_num_players()); + debug_assert_eq!(self.Vs.len(), sc.get_total_num_players()); + debug_assert_eq!(self.Cs.len(), other.Cs.len()); + debug_assert_eq!(self.Rs.len(), other.Rs.len()); + debug_assert_eq!(self.Vs.len(), other.Vs.len()); + + // Aggregate the V0s + self.V0 += other.V0; + + for i in 0..sc.get_total_num_players() { + for j in 0..self.Vs[i].len() { + // Aggregate the V_{i,j}s + self.Vs[i][j] += other.Vs[i][j]; + for k in 0..self.Cs[i][j].len() { + // Aggregate the C_{i,j,k}s + self.Cs[i][j][k] += other.Cs[i][j][k]; + } + } + } + + for j in 0..self.Rs.len() { + for (R_jk, other_R_jk) in self.Rs[j].iter_mut().zip(&other.Rs[j]) { + // Aggregate the R_{j,k}s + *R_jk += other_R_jk; + } + } + + Ok(()) + } +} + +#[allow(non_snake_case)] +#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug, PartialEq, Eq)] +pub struct SharingProof { + /// SoK: the SK is knowledge of `witnesses` s_{i,j} yielding the commitment and the C and the R, their image is the PK, and the signed message is a certain context `cntxt` + pub SoK: hkzg_chunked_elgamal_commit::Proof<'static, E>, // static because we don't want the lifetime of the Proof to depend on the Homomorphism TODO: try removing it? + /// A batched range proof showing that all committed values s_{i,j} lie in some range + pub range_proof: dekart_univariate_v2::Proof, + /// A KZG-style commitment to the values s_{i,j} going into the range proof + pub range_proof_commitment: + as BatchedRangeProof>::Commitment, +} + +impl ValidCryptoMaterial for Transcript { + const AIP_80_PREFIX: &'static str = ""; + + fn to_bytes(&self) -> Vec { + // TODO: using `Result>` and `.map_err(|_| CryptoMaterialError::DeserializationError)` would be more consistent here? + bcs::to_bytes(&self).expect("Unexpected error during PVSS transcript serialization") + } +} + +impl TryFrom<&[u8]> for Transcript { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> Result { + bcs::from_bytes::>(bytes) + .map_err(|_| CryptoMaterialError::DeserializationError) + } +} + +// Temporary hack, will deal with this at some point... a struct would be cleaner +#[allow(type_alias_bounds)] +type SokContext<'a, A: Serialize + Clone> = ( + bls12381::PublicKey, + &'a A, // This is for the session id + usize, // This is for the player id + Vec, // This is for the DST +); + +impl, E: Pairing>> traits::Transcript + for Transcript +{ + type DealtPubKey = keys::DealtPubKey; + type DealtPubKeyShare = Vec>; + type DealtSecretKey = keys::DealtSecretKey; + type DealtSecretKeyShare = Vec>; + type DecryptPrivKey = keys::DecryptPrivKey; + type EncryptPubKey = keys::EncryptPubKey; + type InputSecret = InputSecret; + type PublicParameters = PublicParameters; + type SecretSharingConfig = SecretSharingConfig; + type SigningPubKey = bls12381::PublicKey; + type SigningSecretKey = bls12381::PrivateKey; + + fn scheme_name() -> String { + "chunky_v2".to_string() + } + + /// Fetches the domain-separation tag (DST) + fn dst() -> Vec { + DST.to_vec() + } + + #[allow(non_snake_case)] + fn deal( + sc: &Self::SecretSharingConfig, + pp: &Self::PublicParameters, + _ssk: &Self::SigningSecretKey, + spk: &Self::SigningPubKey, + eks: &[Self::EncryptPubKey], + s: &Self::InputSecret, + session_id: &A, + dealer: &Player, + rng: &mut R, + ) -> Self { + debug_assert_eq!( + eks.len(), + sc.get_total_num_players(), + "Number of encryption keys must equal total weight" + ); + + // Initialize the PVSS SoK context + let sok_cntxt = (spk.clone(), session_id, dealer.id, DST.to_vec()); // This is a bit hacky; also get rid of DST here and use self.dst? Would require making `self` input of `deal()` + + // Generate the Shamir secret sharing polynomial + let mut f = vec![*s.get_secret_a()]; // constant term of polynomial + f.extend(sample_field_elements::( + sc.get_threshold_weight() - 1, + rng, + )); // these are the remaining coefficients; total degree is `t - 1`, so the reconstruction threshold is `t` + + // Generate its `n` evaluations (shares) by doing an FFT over the whole domain, then truncating + let mut f_evals = sc.get_threshold_config().domain.fft(&f); + f_evals.truncate(sc.get_total_weight()); + debug_assert_eq!(f_evals.len(), sc.get_total_weight()); + + // Encrypt the chunked shares and generate the sharing proof + let (Cs, Rs, Vs, sharing_proof) = + Self::encrypt_chunked_shares(&f_evals, eks, pp, sc, sok_cntxt, rng); + + // Add constant term for the `\mathbb{G}_2` commitment (we're doing this **after** the previous step + // because we're now mutating `f_evals` by enlarging it; this is an unimportant technicality however, + // it has no impact on computational complexity whatsoever as we could simply modify the `commit_to_scalars()` + // function to take another input) + // f_evals.push(f[0]); // or *s.get_secret_a() + + // // Commit to polynomial evaluations + constant term + // let G_2 = pp.get_commitment_base(); + // let flattened_Vs = arkworks::commit_to_scalars(&G_2, &f_evals); + // debug_assert_eq!(flattened_Vs.len(), sc.get_total_weight() + 1); + + // let Vs = sc.group_by_player(&flattened_Vs); // This won't use the last item in `flattened_Vs` because of `sc` + // let V0 = *flattened_Vs.last().unwrap(); + + let V0 = pp.get_commitment_base() * f[0]; + + Transcript { + dealer: *dealer, + subtrs: Subtranscript { V0, Vs, Cs, Rs }, + sharing_proof, + } + } + + fn get_dealers(&self) -> Vec { + vec![self.dealer] + } + + #[allow(non_snake_case)] + fn get_public_key_share( + &self, + _sc: &Self::SecretSharingConfig, + player: &Player, + ) -> Self::DealtPubKeyShare { + self.subtrs.Vs[player.id] + .iter() + .map(|V_i| { + let affine = V_i.into_affine(); + + keys::DealtPubKeyShare::::new(keys::DealtPubKey::new(affine)) + }) + .collect() + } + + fn get_dealt_public_key(&self) -> Self::DealtPubKey { + Self::DealtPubKey::new(self.subtrs.V0.into_affine()) + } + + #[allow(non_snake_case)] + fn decrypt_own_share( + &self, + sc: &Self::SecretSharingConfig, + player: &Player, + dk: &Self::DecryptPrivKey, + pp: &Self::PublicParameters, + ) -> (Self::DealtSecretKeyShare, Self::DealtPubKeyShare) { + let weight = sc.get_player_weight(player); + + let Cs = &self.subtrs.Cs[player.id]; + + // TODO: put an assert here saying that len(Cs) = weight + + let ephemeral_keys: Vec<_> = self + .subtrs + .Rs + .iter() + .take(weight) + .map(|R_i_vec| R_i_vec.iter().map(|R_i| R_i.mul(dk.dk)).collect::>()) + .collect(); + + if let Some(first_key) = ephemeral_keys.first() { + debug_assert_eq!( + first_key.len(), + Cs[0].len(), + "Number of ephemeral keys does not match the number of ciphertext chunks" + ); + } + + let mut sk_shares: Vec> = Vec::with_capacity(weight); + let pk_shares = self.get_public_key_share(sc, player); + + for i in 0..weight { + // TODO: should really put this in a separate function + let dealt_encrypted_secret_key_share_chunks: Vec<_> = Cs[i] + .iter() + .zip(ephemeral_keys[i].iter()) + .map(|(C_ij, ephemeral_key)| C_ij.sub(ephemeral_key)) + .collect(); + + let dealt_chunked_secret_key_share = bsgs::dlog_vec( + pp.pp_elgamal.G.into_group(), + &dealt_encrypted_secret_key_share_chunks, + &pp.table, + pp.get_dlog_range_bound(), + ) + .expect("BSGS dlog failed"); + + let dealt_chunked_secret_key_share_fr: Vec = + dealt_chunked_secret_key_share + .iter() + .map(|&x| E::ScalarField::from(x)) + .collect(); + + let dealt_secret_key_share = + chunks::le_chunks_to_scalar(pp.ell, &dealt_chunked_secret_key_share_fr); + + sk_shares.push(Scalar(dealt_secret_key_share)); + } + + ( + sk_shares, pk_shares, // TODO: review this formalism... wh ydo we need this here? + ) + } + + #[allow(non_snake_case)] + fn generate(sc: &Self::SecretSharingConfig, pp: &Self::PublicParameters, rng: &mut R) -> Self + where + R: rand_core::RngCore + rand_core::CryptoRng, + { + let num_chunks_per_share = num_chunks_per_scalar::(pp.ell) as usize; + + Transcript { + dealer: sc.get_player(0), + subtrs: Subtranscript { + V0: unsafe_random_point_group::(rng), + Vs: sc.group_by_player(&unsafe_random_points_group::( + sc.get_total_weight(), + rng, + )), + Cs: (0..sc.get_total_num_players()) + .map(|i| { + let w = sc.get_player_weight(&sc.get_player(i)); // TODO: combine these functions... + (0..w) + .map(|_| unsafe_random_points_group(num_chunks_per_share, rng)) + .collect() // todo: use vec![vec![]]... like in the generate functions + }) + .collect(), + Rs: (0..sc.get_max_weight()) + .map(|_| unsafe_random_points_group(num_chunks_per_share, rng)) + .collect(), + }, + sharing_proof: SharingProof { + range_proof_commitment: sigma_protocol::homomorphism::TrivialShape( + unsafe_random_point_group(rng), + ), + SoK: hkzg_chunked_elgamal_commit::Proof::generate(sc, num_chunks_per_share, rng), + range_proof: dekart_univariate_v2::Proof::generate(pp.ell, rng), + }, + } + } +} + +use crate::sigma_protocol::homomorphism::tuple::PairingTupleHomomorphism; + +impl, E: Pairing>> Transcript { + // why are N and P needed? TODO: maybe integrate into deal() + #[allow(non_snake_case)] + pub fn encrypt_chunked_shares< + 'a, + A: Serialize + Clone, + R: rand_core::RngCore + rand_core::CryptoRng, + >( + f_evals: &[E::ScalarField], + eks: &[keys::EncryptPubKey], + pp: &PublicParameters, + sc: &::SecretSharingConfig, // only for debugging purposes? + sok_cntxt: SokContext<'a, A>, + rng: &mut R, + ) -> ( + Vec>>, + Vec>, + Vec>, + SharingProof, + ) { + // Generate the required randomness + let hkzg_randomness = univariate_hiding_kzg::CommitmentRandomness::rand(rng); + let elgamal_randomness = Scalar::vecvec_from_inner( + (0..sc.get_max_weight()) + .map(|_| { + chunked_elgamal::correlated_randomness( + rng, + 1 << pp.ell as u64, + num_chunks_per_scalar::(pp.ell), + &E::ScalarField::ZERO, + ) + }) + .collect(), + ); + + // Chunk and flatten the shares + let f_evals_chunked: Vec> = f_evals + .iter() + .map(|f_eval| chunks::scalar_to_le_chunks(pp.ell, f_eval)) + .collect(); + // Flatten it now (for use in the range proof) before `f_evals_chunked` is consumed in the next step + let f_evals_chunked_flat: Vec = + f_evals_chunked.iter().flatten().copied().collect(); + // Separately, gather the chunks by weight + let f_evals_weighted = sc.group_by_player(&f_evals_chunked); + + // Now generate the encrypted shares and range proof commitment, together with its SoK, so: + // (1) Set up the witness + let witness = HkzgWeightedElgamalWitness { + hkzg_randomness, + chunked_plaintexts: Scalar::vecvecvec_from_inner(f_evals_weighted), + elgamal_randomness, + }; + // (2) Compute its image under the corresponding homomorphism, and produce an SoK + // (2a) Set up the tuple homomorphism + let eks_inner: Vec<_> = eks.iter().map(|ek| ek.ek).collect(); // TODO: this is a bit ugly + let lagr_g1: &[E::G1Affine] = match &pp.pk_range_proof.ck_S.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1, + SrsBasis::PowersOfTau { .. } => { + panic!("Expected a Lagrange basis, received powers of tau basis instead") + }, + }; + let hom = hkzg_chunked_elgamal_commit::Homomorphism::::new( + lagr_g1, + pp.pk_range_proof.ck_S.xi_1, + &pp.pp_elgamal, + &eks_inner, + pp.get_commitment_base(), + pp.ell, + ); + // (2b) Compute its image (the public statement), so the range proof commitment and chunked_elgamal encryptions + let statement = hom.apply(&witness); // hmm slightly inefficient that we're unchunking here, so might be better to set up a "small" hom just for this part + // (2c) Produce the SoK + let SoK = PairingTupleHomomorphism::prove(&hom, &witness, &statement, &sok_cntxt, rng) + .change_lifetime(); // Make sure the lifetime of the proof is not coupled to `hom` which has references + + // Destructure the "public statement" of the above sigma protocol + let TupleCodomainShape( + TupleCodomainShape( + range_proof_commitment, + chunked_elgamal::WeightedCodomainShape { + chunks: Cs, + randomness: Rs, + }, + ), + chunked_scalar_mul::CodomainShape(Vs), + ) = statement; + + // debug_assert_eq!( + // Cs.len(), + // sc.get_total_weight(), + // "Number of encrypted chunks must equal number of players" + // ); + + // Generate the batch range proof, given the `range_proof_commitment` produced in the PoK + let range_proof = dekart_univariate_v2::Proof::prove( + &pp.pk_range_proof, + &f_evals_chunked_flat, + pp.ell as usize, + &range_proof_commitment, + &hkzg_randomness, + rng, + ); + + // Assemble the sharing proof + let sharing_proof = SharingProof { + SoK, + range_proof, + range_proof_commitment, + }; + + //let Vs = sc.group_by_player(&Vs_flat.0); + + (Cs, Rs, Vs, sharing_proof) + } +} + +impl, E: Pairing>> MalleableTranscript + for Transcript +{ + fn maul_signature( + &mut self, + _ssk: &Self::SigningSecretKey, + _aux: &A, + _player: &Player, + ) { + // TODO: We're not using this but it probably fails if we don't; but that would probably mean recomputing almost the entire transcript... but then that would require eks and pp + panic!("Doesn't work for this PVSS, at least for now"); + // self.dealer = *player; + + // let sgn = ssk + // .sign(&self.utrs) + // .expect("signing of `chunky` PVSS transcript failed"); + // self.sgn = sgn; + } +} diff --git a/crates/aptos-dkg/src/pvss/test_utils.rs b/crates/aptos-dkg/src/pvss/test_utils.rs index b83c34ed378ce..a8acef39ef814 100644 --- a/crates/aptos-dkg/src/pvss/test_utils.rs +++ b/crates/aptos-dkg/src/pvss/test_utils.rs @@ -6,7 +6,7 @@ use crate::pvss::{ transcript::{Transcript, WithMaxNumShares}, Convert, HasEncryptionPublicParams, Subtranscript, }, - Player, ThresholdConfigBlstrs, WeightedConfigBlstrs, + Player, ThresholdConfigBlstrs, }; use aptos_crypto::{ arkworks::shamir::Reconstructable, @@ -52,12 +52,12 @@ pub fn setup_dealing DealingArgs { println!( - "Setting up dealing for {} PVSS, with {}", + "Setting up dealing for {} PVSS, with {} (and some elliptic curve)", T::scheme_name(), sc ); - let pp = T::PublicParameters::with_max_num_shares(sc.get_total_num_players()); + let pp = T::PublicParameters::with_max_num_shares(sc.get_total_num_shares()); let (ssks, spks, dks, eks, iss, s, dsk, dpk) = generate_keys_and_secrets::(sc, &pp, &mut rng); @@ -75,7 +75,7 @@ pub fn setup_dealing>, @@ -218,6 +218,8 @@ pub fn get_weighted_configs_for_testing() -> Vec::new(1, vec![1, 1]).unwrap()); // 2-out-of-2, weights 1 1 wcs.push(WeightedConfig::::new(2, vec![1, 1]).unwrap()); + // 2-out-of-3, weights 2 1 + wcs.push(WeightedConfig::::new(2, vec![2, 1]).unwrap()); // 1-out-of-3, weights 1 1 1 wcs.push(WeightedConfig::::new(1, vec![1, 1, 1]).unwrap()); @@ -248,6 +250,8 @@ pub fn get_weighted_configs_for_testing() -> Vec() -> V .collect() } -pub fn get_weighted_configs_for_benchmarking() -> Vec { +pub fn get_weighted_configs_for_benchmarking() -> Vec> +{ let mut wcs = vec![]; // This one was produced mid-Nov 2025 @@ -276,9 +281,10 @@ pub fn get_weighted_configs_for_benchmarking() -> Vec { 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 7, ]; let threshold = 129; // slow path - wcs.push(WeightedConfigBlstrs::new(threshold, weights.clone()).unwrap()); - let threshold = 166; // fast path - wcs.push(WeightedConfigBlstrs::new(threshold, weights).unwrap()); + wcs.push(WeightedConfig::::new(threshold, weights.clone()).unwrap()); + // let threshold = 166; // fast path; not including this at the moment because + // threshold size barely influences benchmarks + // wcs.push(WeightedConfig::::new(threshold, weights).unwrap()); let weights = vec![ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -289,7 +295,7 @@ pub fn get_weighted_configs_for_benchmarking() -> Vec { ]; let total_weight: usize = weights.iter().sum(); let threshold = total_weight * 2 / 3 + 1; - wcs.push(WeightedConfigBlstrs::new(threshold, weights).unwrap()); + wcs.push(WeightedConfig::::new(threshold, weights).unwrap()); let weights = vec![ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, @@ -300,7 +306,7 @@ pub fn get_weighted_configs_for_benchmarking() -> Vec { ]; let total_weight: usize = weights.iter().sum(); let threshold = total_weight * 2 / 3 + 1; - wcs.push(WeightedConfigBlstrs::new(threshold, weights).unwrap()); + wcs.push(WeightedConfig::::new(threshold, weights).unwrap()); let weights = vec![ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, @@ -311,7 +317,7 @@ pub fn get_weighted_configs_for_benchmarking() -> Vec { ]; let total_weight: usize = weights.iter().sum(); let threshold = total_weight * 2 / 3 + 1; - wcs.push(WeightedConfigBlstrs::new(threshold, weights).unwrap()); + wcs.push(WeightedConfig::::new(threshold, weights).unwrap()); wcs } diff --git a/crates/aptos-dkg/src/range_proofs/dekart_univariate.rs b/crates/aptos-dkg/src/range_proofs/dekart_univariate.rs index 8cfcec0f6c26b..d27a46ccce4d3 100644 --- a/crates/aptos-dkg/src/range_proofs/dekart_univariate.rs +++ b/crates/aptos-dkg/src/range_proofs/dekart_univariate.rs @@ -2,7 +2,8 @@ // Licensed pursuant to the Innovation-Enabling Source Code License, available at https://github.com/aptos-labs/aptos-core/blob/main/LICENSE use crate::{ - algebra::polynomials, fiat_shamir, pcs::univariate_kzg, range_proofs::traits, + algebra::polynomials, fiat_shamir, pcs::univariate_kzg, + pvss::chunky::chunked_elgamal::correlated_randomness, range_proofs::traits, sigma_protocol::homomorphism::Trait, utils, Scalar, }; use anyhow::ensure; @@ -11,7 +12,7 @@ use ark_ec::{ pairing::{Pairing, PairingOutput}, CurveGroup, PrimeGroup, VariableBaseMSM, }; -use ark_ff::{AdditiveGroup, Field, PrimeField}; +use ark_ff::{AdditiveGroup, Field}; use ark_poly::{self, EvaluationDomain, Radix2EvaluationDomain}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError}; #[cfg(feature = "range_proof_timing")] @@ -290,7 +291,7 @@ impl traits::BatchedRangeProof for Proof { #[cfg(feature = "range_proof_timing")] let start = Instant::now(); - let r = correlated_randomness(rng, 2, ell, &r.0); + let r = correlated_randomness(rng, 2, ell.try_into().unwrap(), &r.0); #[cfg(feature = "range_proof_timing")] { @@ -671,62 +672,3 @@ fn fiat_shamir_challenges( (alpha_vals, beta_vals) } - -/// Generate correlated random values whose weighted sum equals `target_sum`. -/// -/// Returns `num_chunks` field elements `[r_0, ..., r_{num_chunks-1}]` such that: -/// `r_0 + r_1 * radix + r_2 * radix^2 + ... + r_{num_chunks-1} * radix^{num_chunks-1} = target_sum`. -pub fn correlated_randomness( - rng: &mut R, - radix: u64, - num_chunks: usize, - target_sum: &F, -) -> Vec -where - F: PrimeField, // PrimeField because of sample_field_element() - R: rand_core::RngCore + rand_core::CryptoRng, -{ - let mut r_vals = vec![F::zero(); num_chunks]; - let mut remaining = *target_sum; - let radix_f = F::from(radix); - let mut cur_base = radix_f; - - for i in 1..num_chunks { - r_vals[i] = sample_field_element(rng); - remaining -= r_vals[i] * cur_base; - cur_base *= radix_f; - } - r_vals[0] = remaining; - - r_vals -} - -#[cfg(test)] -mod tests { - use crate::range_proofs::dekart_univariate::correlated_randomness; - use ark_ff::PrimeField; - use rand::thread_rng; - - #[cfg(test)] - fn test_correlated_randomness_generic() { - let mut rng = thread_rng(); - let target_sum = F::one(); - let radix: u64 = 4; - let num_chunks: usize = 8; - - let coefs = correlated_randomness(&mut rng, radix, num_chunks, &target_sum); - - // Compute actual sum: Σ coef[i] * radix^i - let actual_sum: F = (0..num_chunks) - .map(|i| coefs[i] * F::from(radix.pow(i as u32))) - .sum(); - - assert_eq!(target_sum, actual_sum); - } - - #[test] - fn test_correlated_randomness_bn254() { - use ark_bn254::Fr; - test_correlated_randomness_generic::(); - } -} diff --git a/crates/aptos-dkg/src/range_proofs/dekart_univariate_v2.rs b/crates/aptos-dkg/src/range_proofs/dekart_univariate_v2.rs index 7e65459ce6d5f..48c3b9090c653 100644 --- a/crates/aptos-dkg/src/range_proofs/dekart_univariate_v2.rs +++ b/crates/aptos-dkg/src/range_proofs/dekart_univariate_v2.rs @@ -7,15 +7,21 @@ use crate::{ algebra::polynomials, pcs::univariate_hiding_kzg, range_proofs::traits, - sigma_protocol::{self, homomorphism, homomorphism::Trait as _, Trait as _}, + sigma_protocol::{ + self, + homomorphism::{self, Trait as _}, + Trait as _, + }, utils, Scalar, }; use aptos_crypto::arkworks::{ self, msm::MsmInput, random::{ - sample_field_element, sample_field_elements, unsafe_random_point, unsafe_random_points, + sample_field_element, sample_field_elements, unsafe_random_point, + unsafe_random_points_group, }, + srs::{SrsBasis, SrsType}, GroupGenerators, }; use ark_ec::{pairing::Pairing, CurveGroup, PrimeGroup, VariableBaseMSM}; @@ -46,10 +52,10 @@ impl Proof { /// Useful for testing and benchmarking. TODO: might be able to derive this through macros etc pub fn generate(ell: u8, rng: &mut R) -> Self { Self { - hatC: unsafe_random_point(rng), + hatC: unsafe_random_point::(rng).into(), pi_PoK: two_term_msm::Proof::generate(rng), - Cs: unsafe_random_points(ell as usize, rng), - D: unsafe_random_point(rng), + Cs: unsafe_random_points_group(ell as usize, rng), + D: unsafe_random_point::(rng).into(), a: sample_field_element(rng), a_h: sample_field_element(rng), a_js: sample_field_elements(ell as usize, rng), @@ -259,8 +265,12 @@ impl traits::BatchedRangeProof for Proof { let trapdoor = univariate_hiding_kzg::Trapdoor::::rand(rng); let xi_1_proj: E::G1 = group_generators.g1 * trapdoor.xi; - let (vk_hkzg, ck_S) = - univariate_hiding_kzg::setup(max_n + 1, group_generators.clone(), trapdoor, rng); + let (vk_hkzg, ck_S) = univariate_hiding_kzg::setup( + max_n + 1, + SrsType::Lagrange, + group_generators.clone(), + trapdoor, + ); let h_denom_eval = compute_h_denom_eval::(&ck_S.roots_of_unity_in_eval_dom); @@ -276,9 +286,14 @@ impl traits::BatchedRangeProof for Proof { roots_of_unity: ck_S.roots_of_unity_in_eval_dom.clone(), }; + let lagr_0: E::G1Affine = match &ck_S.msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1[0], + SrsBasis::PowersOfTau { .. } => panic!("Wrong basis, this should not happen"), + }; + let vk = VerificationKey { xi_1: xi_1_proj.into_affine(), - lagr_0: ck_S.lagr_g1[0], + lagr_0, vk_hkzg, verifier_precomputed, }; @@ -347,12 +362,19 @@ impl traits::BatchedRangeProof for Proof { let univariate_hiding_kzg::CommitmentKey { xi_1, - lagr_g1, + msm_basis, eval_dom, m_inv: num_omegas_inv, .. } = ck_S; + let lagr_g1: &[E::G1Affine] = match msm_basis { + SrsBasis::Lagrange { lagr: lagr_g1 } => lagr_g1, + SrsBasis::PowersOfTau { .. } => { + panic!("Expected Lagrange basis, somehow got PowersOfTau basis instead") + }, + }; + debug_assert_eq!( *num_omegas_inv, E::ScalarField::from(num_omegas as u64).inverse().unwrap() @@ -420,7 +442,7 @@ impl traits::BatchedRangeProof for Proof { .collect(); let hkzg_commitment_hom = univariate_hiding_kzg::CommitmentHomomorphism:: { - lagr_g1, + msm_basis: lagr_g1, xi_1: *xi_1, }; let Cs: Vec<_> = f_js_evals @@ -730,7 +752,7 @@ impl traits::BatchedRangeProof for Proof { use sigma_protocol::homomorphism::TrivialShape as HkzgCommitment; univariate_hiding_kzg::CommitmentHomomorphism::verify( - vk_hkzg.clone(), + *vk_hkzg, HkzgCommitment(U), // TODO: Ugh univariate_hiding_kzg::Commitment(U) does not work because it's a tuple struct, see https://github.com/rust-lang/rust/issues/17422; So make it a struct with one named field? gamma, a_u, @@ -874,6 +896,8 @@ mod fiat_shamir { /// Conceptually, this behaves similarly to a Pedersen commitment: /// /// `output = base_1 * scalar_1 + base_2 * scalar_2` +/// +/// The resulting sigma protocol is also known as Okamoto's protocol (see 19.5.1 in the book of Boneh-Shoup) pub mod two_term_msm { // TODO: maybe fixed_base_msms should become a folder and put its code inside mod.rs? Then put this mod inside of that folder? use super::*; @@ -892,9 +916,9 @@ pub mod two_term_msm { /// Useful for testing and benchmarking. TODO: might be able to derive this through macros etc pub fn generate(rng: &mut R) -> Self { Self { - first_proof_item: FirstProofItem::Commitment(CodomainShape(unsafe_random_point( - rng, - ))), + first_proof_item: FirstProofItem::Commitment(CodomainShape( + unsafe_random_point::(rng).into(), + )), z: Witness { poly_randomness: Scalar::rand(rng), hiding_kzg_randomness: Scalar::rand(rng), diff --git a/crates/aptos-dkg/src/range_proofs/mod.rs b/crates/aptos-dkg/src/range_proofs/mod.rs index f5c25b00ff8cf..f30f7851b2d4a 100644 --- a/crates/aptos-dkg/src/range_proofs/mod.rs +++ b/crates/aptos-dkg/src/range_proofs/mod.rs @@ -3,4 +3,5 @@ pub mod dekart_univariate; pub mod dekart_univariate_v2; +//pub mod dekart_multivariate; pub mod traits; diff --git a/crates/aptos-dkg/src/sigma_protocol/homomorphism/fixed_base_msms.rs b/crates/aptos-dkg/src/sigma_protocol/homomorphism/fixed_base_msms.rs index b137411161ef5..e5504b86b2e7e 100644 --- a/crates/aptos-dkg/src/sigma_protocol/homomorphism/fixed_base_msms.rs +++ b/crates/aptos-dkg/src/sigma_protocol/homomorphism/fixed_base_msms.rs @@ -26,7 +26,7 @@ use std::fmt::Debug; /// - A uniform “shape” abstraction for collecting and flattening MSM outputs /// for batch verification in Σ-protocols. pub trait Trait: homomorphism::Trait> { - // Type representing the scalar used in the `MsmInput`s. Convenient to repeat here + // Type representing the scalar used in the `MsmInput`s. Convenient to repeat here, and currently used in `prove_homomorphism()` where it could be replaced by e.g. `C::ScalarField`... (or maybe by going inside of MsmInput) type Scalar: ark_ff::PrimeField; // Probably need less here but this what it'll be in practice /// Type representing a single MSM input (a set of bases and scalars). Normally, this would default @@ -79,7 +79,7 @@ pub trait Trait: homomorphism::Trait, ) -> Self::CodomainShape where diff --git a/crates/aptos-dkg/src/sigma_protocol/homomorphism/mod.rs b/crates/aptos-dkg/src/sigma_protocol/homomorphism/mod.rs index 6a18b0c2a91dc..eafd133264fda 100644 --- a/crates/aptos-dkg/src/sigma_protocol/homomorphism/mod.rs +++ b/crates/aptos-dkg/src/sigma_protocol/homomorphism/mod.rs @@ -102,7 +102,7 @@ pub trait EntrywiseMap { fn map(self, f: F) -> Self::Output where - F: Fn(T) -> U, + F: FnMut(T) -> U, U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq; } @@ -121,9 +121,9 @@ impl Entrywis type Output = TrivialShape; - fn map(self, f: F) -> Self::Output + fn map(self, mut f: F) -> Self::Output where - F: Fn(T) -> U, + F: FnMut(T) -> U, U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, { TrivialShape(f(self.0)) @@ -164,7 +164,7 @@ impl Entrywis fn map(self, f: F) -> Self::Output where - F: Fn(T) -> U, + F: FnMut(T) -> U, U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, { VectorShape(self.0.into_iter().map(f).collect()) diff --git a/crates/aptos-dkg/src/sigma_protocol/homomorphism/tuple.rs b/crates/aptos-dkg/src/sigma_protocol/homomorphism/tuple.rs index 560b94c51ebd3..db56812cc890b 100644 --- a/crates/aptos-dkg/src/sigma_protocol/homomorphism/tuple.rs +++ b/crates/aptos-dkg/src/sigma_protocol/homomorphism/tuple.rs @@ -37,6 +37,7 @@ where pub hom2: H2, } +// We need to add `E: Pairing` because of the sigma protocol implementation below, Rust wouldn't allow that otherwise #[derive(CanonicalSerialize, Debug, Clone, PartialEq, Eq)] pub struct PairingTupleHomomorphism where @@ -162,12 +163,12 @@ where type Output = TupleCodomainShape, B::Output>; - fn map(self, f: F) -> Self::Output + fn map(self, mut f: F) -> Self::Output where - F: Fn(T) -> U, + F: FnMut(T) -> U, U: CanonicalSerialize + CanonicalDeserialize + Clone + Debug + Eq, { - TupleCodomainShape(self.0.map(&f), self.1.map(f)) + TupleCodomainShape(self.0.map(&mut f), self.1.map(f)) } } @@ -245,7 +246,9 @@ use aptos_crypto::utils; use ark_ff::{UniformRand, Zero}; use serde::Serialize; -// Slightly hacky implementation of a sigma protocol for `PairingTupleHomomorphism` +/// Slightly hacky implementation of a sigma protocol for `PairingTupleHomomorphism` +/// +/// We need `E: Pairing` here because the sigma_protocol needs to know which curves `H1` and `H2` are working over impl PairingTupleHomomorphism where H1: sigma_protocol::Trait, @@ -294,11 +297,11 @@ where } #[allow(non_snake_case)] - pub fn verify( + pub fn verify( &self, public_statement: &::Codomain, proof: &Proof, // Would like to set &Proof, but that ties the lifetime of H to that of Self, but we'd like it to be eg static - cntxt: &C, + cntxt: &Ct, ) -> anyhow::Result<()> where H: homomorphism::Trait< @@ -346,7 +349,7 @@ where ); let mut rng = ark_std::rand::thread_rng(); // TODO: make this part of the function input? - let beta = H1::Scalar::rand(&mut rng); + let beta = H1::Scalar::rand(&mut rng); // verifier-specific challenge let len1 = public_statement.0.clone().into_iter().count(); // hmm maybe pass the into_iter version in merge_msm_terms? let len2 = public_statement.1.clone().into_iter().count(); let powers_of_beta = utils::powers(beta, len1 + len2); diff --git a/crates/aptos-dkg/src/sigma_protocol/traits.rs b/crates/aptos-dkg/src/sigma_protocol/traits.rs index c9dde08ae32c5..4ad69663bb5be 100644 --- a/crates/aptos-dkg/src/sigma_protocol/traits.rs +++ b/crates/aptos-dkg/src/sigma_protocol/traits.rs @@ -15,14 +15,16 @@ use aptos_crypto::{ utils, }; use ark_ec::CurveGroup; -use ark_ff::{Field, PrimeField}; +use ark_ff::{Field, Fp, FpConfig, PrimeField}; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError, Valid, Validate, }; use ark_std::{io::Read, UniformRand}; +use rand_core::{CryptoRng, RngCore}; use serde::Serialize; use std::{fmt::Debug, io::Write}; +// `CurveGroup` is needed here because the code does `into_affine()` pub trait Trait: fixed_base_msms::Trait< Domain: Witness, @@ -36,7 +38,7 @@ pub trait Trait: /// transcript operations within the protocol are uniquely namespaced fn dst(&self) -> Vec; - fn prove( + fn prove( &self, witness: &Self::Domain, statement: &Self::Codomain, @@ -146,6 +148,7 @@ pub trait Trait: let mut final_scalars = Vec::new(); // Collect all projective points to batch normalize + // TODO: remove this stuff... we may assume things are deserialised and hence essentially affine, so into_affine() should do let mut all_points_to_normalize = Vec::new(); for (A, P) in prover_first_message.clone().into_iter() .zip(statement.clone().into_iter()) @@ -189,30 +192,25 @@ pub trait Witness: CanonicalSerialize + CanonicalDeserialize + Clone + /// Samples a random element in the domain. The prover has a witness `w` and calls `w.rand(rng)` to get /// the prover's first nonce (of the same "size" as `w`, hence why this cannot be a static method), /// which it then uses to compute the prover's first message in the sigma protocol. - fn rand(&self, rng: &mut R) -> Self; + fn rand(&self, rng: &mut R) -> Self; } -// use ark_ff::FpConfig; -// use ark_ff::Fp; - -// impl> Witness for Fp { -// type Scalar = Fp; - -// fn scaled_add(self, other: &Self, c: Fp) -> Self { -// Scalar(self.0 + (c) * other.0) -// } +impl> Witness> for Fp { + fn scaled_add(self, other: &Self, c: Fp) -> Self { + self + c * other + } -// fn rand(&self, rng: &mut R) -> Self { -// Scalar(sample_field_element(rng)) -// } -// } + fn rand(&self, rng: &mut R) -> Self { + sample_field_element(rng) + } +} impl Witness for Scalar { fn scaled_add(self, other: &Self, c: F) -> Self { Scalar(self.0 + (c) * other.0) } - fn rand(&self, rng: &mut R) -> Self { + fn rand(&self, rng: &mut R) -> Self { Scalar(sample_field_element(rng)) } } @@ -225,7 +223,7 @@ impl> Witness for Vec { .collect() } - fn rand(&self, rng: &mut R) -> Self { + fn rand(&self, rng: &mut R) -> Self { self.iter().map(|elem| elem.rand(rng)).collect() } } @@ -463,6 +461,8 @@ where ) } +// We're keeping this separate because it only needs the homomorphism property rather than being a bunch of "fixed-base MSMS", +// and moreover in this way it gets reused in the PairingTupleHomomorphism code which has a custom sigma protocol implementation #[allow(non_snake_case)] pub fn prove_homomorphism( homomorphism: &H, @@ -476,7 +476,7 @@ pub fn prove_homomorphism, H::Codomain: Statement, - R: rand_core::RngCore + rand_core::CryptoRng, + R: RngCore + CryptoRng, { // Step 1: Sample randomness. Here the `witness` is only used to make sure that `r` has the right dimensions let r = witness.rand(rng); diff --git a/crates/aptos-dkg/tests/pvss.rs b/crates/aptos-dkg/tests/pvss.rs index dc728719d9186..61a88d9f858bd 100644 --- a/crates/aptos-dkg/tests/pvss.rs +++ b/crates/aptos-dkg/tests/pvss.rs @@ -29,6 +29,7 @@ use aptos_dkg::pvss::{ GenericWeighting, ThresholdConfigBlstrs, }; use ark_bn254::Bn254; +use ark_ec::pairing::Pairing; use rand::{rngs::StdRng, thread_rng}; use rand_core::SeedableRng; @@ -55,28 +56,6 @@ fn test_pvss_all_unweighted() { // TODO: Remove? pvss_deal_verify_and_reconstruct::(&tc, seed.to_bytes_le()); } - - // Restarting the loop here because now it'll grab **arkworks** `ThresholdConfig`s over BN254 instead - let tcs = test_utils::get_threshold_configs_for_testing_smaller(); - for tc in tcs.iter().take(20) { - // Reduce the number of tcs to make it a bit faster? - println!("\nTesting {tc} PVSS"); - - let seed = random_scalar(&mut rng); - - type ChunkyTranscriptBn254 = chunky::UnsignedUnweightedTranscript; - - // Chunky - nonaggregatable_pvss_deal_verify_and_reconstruct::( - &tc, - seed.to_bytes_le(), - ); - - pvss_deal_verify_and_reconstruct_from_subtranscript::( - &tc, - seed.to_bytes_le(), - ); - } } #[test] @@ -123,23 +102,20 @@ fn test_pvss_all_weighted() { Bn254, chunky::SignedWeightedTranscript, >(&wc, seed.to_bytes_le()); + nonaggregatable_weighted_pvss_deal_verify_and_reconstruct::< + Bn254, + chunky::SignedWeightedTranscriptv2, + >(&wc, seed.to_bytes_le()); // Unsigned weighted Chunky nonaggregatable_weighted_pvss_deal_verify_and_reconstruct::< Bn254, chunky::UnsignedWeightedTranscript, >(&wc, seed.to_bytes_le()); - - //type SignedChunkyTranscriptBn254 = signed::GenericSigning>; TODO!! - // type UnsignedChunkyTranscriptBn254 = chunky::UnsignedWeightedTranscript; - - // OLD: ::SecretSharingConfig - // if wc.get_total_num_players() > 8 { - // test_pvss_aggregate_subtranscript_and_decrypt::( - // &wc, - // seed.to_bytes_le(), - // ); - // } + nonaggregatable_weighted_pvss_deal_verify_and_reconstruct::< + Bn254, + chunky::UnsignedWeightedTranscriptv2, + >(&wc, seed.to_bytes_le()); } } @@ -157,27 +133,27 @@ fn test_pvss_transcript_size() { // Restarting the loop here because now it'll grab **arkworks** `ThresholdConfig`s with BN254 // uses default chunk sizes, so probably want to modify this at some point to allow a wider range // Ideally should iterate over a vec of (t, n), not the actual threshold configs... but won't be a bottleneck - for sc in get_threshold_configs_for_benchmarking().iter().take(1) { + for sc in get_weighted_configs_for_benchmarking().iter().take(1) { // Only trying 1 for now to keep tests fast (also the second one has the same n, which means it would yield the same size...) println!(); let actual_size = - actual_transcript_size::>(&sc); - print_transcript_size::>( + actual_transcript_size::>(&sc); + print_transcript_size::>( "Actual for BN254", &sc, actual_size, - ); + ); // TODO: also do signed here? or only do signed? } // Restarting so it grabs BLS12-381 instead of BN254... TODO: could get rid of this with some work - for sc in get_threshold_configs_for_benchmarking().iter().take(1) { + for sc in get_weighted_configs_for_benchmarking().iter().take(1) { // Only trying 1 for now to keep tests fast (also the second one has the same n, which means it would yield the same size...) println!(); let actual_size = actual_transcript_size::< - chunky::UnsignedUnweightedTranscript, + chunky::UnsignedWeightedTranscript, >(&sc); - print_transcript_size::>( + print_transcript_size::>( "Actual for BLS12_381", &sc, actual_size, @@ -293,6 +269,7 @@ fn test_pvss_aggregate_subtranscript_and_decrypt( } #[cfg(test)] +#[allow(dead_code)] // TODO fn nonaggregatable_pvss_deal_verify_and_reconstruct( sc: &T::SecretSharingConfig, seed_bytes: [u8; 32], @@ -329,7 +306,6 @@ fn nonaggregatable_pvss_deal_verify_and_reconstruct( @@ -372,6 +348,7 @@ fn nonaggregatable_weighted_pvss_deal_verify_and_reconstruct( } #[cfg(test)] +#[allow(dead_code)] // TODO fn pvss_deal_verify_and_reconstruct_from_subtranscript< T: Transcript + HasAggregatableSubtranscript, >( diff --git a/crates/aptos-dkg/tests/secret_sharing_config.rs b/crates/aptos-dkg/tests/secret_sharing_config.rs index 297f49fb7e8b5..68b12cc4bc814 100644 --- a/crates/aptos-dkg/tests/secret_sharing_config.rs +++ b/crates/aptos-dkg/tests/secret_sharing_config.rs @@ -4,14 +4,14 @@ #![allow(clippy::ptr_arg)] #![allow(clippy::needless_borrow)] -use aptos_crypto::traits::SecretSharingConfig as _; +use aptos_crypto::{arkworks::shamir::ShamirThresholdConfig, traits::SecretSharingConfig as _}; use aptos_dkg::pvss::test_utils::get_weighted_configs_for_benchmarking; use rand::thread_rng; #[ignore] #[test] fn print_best_worst_avg_case_subsets() { - let wcs = get_weighted_configs_for_benchmarking(); + let wcs = get_weighted_configs_for_benchmarking::>(); let mut rng = thread_rng(); diff --git a/crates/aptos-dkg/tests/sigma_protocol.rs b/crates/aptos-dkg/tests/sigma_protocol.rs index 981666912ba66..20af79b9c0a3d 100644 --- a/crates/aptos-dkg/tests/sigma_protocol.rs +++ b/crates/aptos-dkg/tests/sigma_protocol.rs @@ -3,7 +3,7 @@ use aptos_crypto::arkworks::{ msm::{IsMsmInput, MsmInput}, - random::sample_field_element, + random::{sample_field_element, sample_field_elements}, }; use aptos_dkg::{ sigma_protocol::{ @@ -20,10 +20,13 @@ use aptos_dkg::{ use ark_bls12_381::Bls12_381; use ark_bn254::Bn254; use ark_ec::{pairing::Pairing, CurveGroup, PrimeGroup}; +use ark_ff::{Fp, FpConfig}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use rand::thread_rng; use std::fmt::Debug; +const CNTXT: &[u8; 32] = b"SIGMA-PROTOCOL-TESTS-SOK-CONTEXT"; + #[cfg(test)] pub fn test_sigma_protocol(hom: H, witness: H::Domain) where @@ -33,29 +36,48 @@ where let mut rng = thread_rng(); let statement = hom.apply(&witness); - let ctxt = b"SIGMA-PROTOCOL-CONTEXT"; - let proof = hom.prove(&witness, &statement, ctxt, &mut rng); + let proof = hom.prove(&witness, &statement, CNTXT, &mut rng); - hom.verify(&statement, &proof, ctxt) + hom.verify(&statement, &proof, CNTXT) .expect("Sigma protocol proof failed verification"); } -fn test_imhomog_chaum_pedersen( +// TODO: Find a way to make this more modular +fn test_imhomog_chaum_pedersen< + E: Pairing>, + const N: usize, + P: FpConfig, +>( hom: chaum_pedersen::InhomogChaumPedersen, - witness: Scalar, + witness: E::ScalarField, +) { + let mut rng = thread_rng(); + + let statement = hom.apply(&witness); + + let proof = hom.prove(&witness, &statement, CNTXT, &mut rng); + + hom.verify(&statement, &proof, CNTXT) + .expect("Inhomogeneous Chaum Pederson sigma proof failed verification"); +} + +use aptos_dkg::pvss::chunky::chunked_scalar_mul::Witness; + +fn test_imhomog_scalar_mul( + hom: chunked_scalar_mul::InhomogChunkedScalarMul, + witness: Witness, ) where E: Pairing, { let mut rng = thread_rng(); let statement = hom.apply(&witness); - let ctxt = b"SIGMA-PROTOCOL-CONTEXT"; - let proof = hom.prove(&witness, &statement, ctxt, &mut rng); + let proof = hom.prove(&witness, &statement, CNTXT, &mut rng); - hom.verify(&statement, &proof, ctxt) - .expect("PairingTupleHomomorphism proof failed verification"); + hom.verify(&statement, &proof, CNTXT) + .expect("Inhomogeneous Chaum Pederson sigma proof failed verification"); } mod schnorr { @@ -77,16 +99,20 @@ mod schnorr { } } - impl homomorphism::Trait for Schnorr { + impl>, const N: usize, P: FpConfig> homomorphism::Trait + for Schnorr + { type Codomain = CodomainShape; - type Domain = Scalar; + type Domain = Fp; fn apply(&self, input: &Self::Domain) -> Self::Codomain { self.apply_msm(self.msm_terms(input)) } } - impl fixed_base_msms::Trait for Schnorr { + impl>, const N: usize, P: FpConfig> + fixed_base_msms::Trait for Schnorr + { type CodomainShape = CodomainShape where @@ -98,16 +124,20 @@ mod schnorr { fn msm_terms(&self, input: &Self::Domain) -> Self::CodomainShape { CodomainShape(MsmInput { bases: vec![self.G], - scalars: vec![input.0], + scalars: vec![*input], }) } fn msm_eval(input: Self::MsmInput) -> Self::MsmOutput { + // for the homomorphism we only need `input.bases()[0] * input.scalars()[0]` + // but the verification needs a 3-term MSM... so we should really do a custom MSM which dispatches based on length TODO C::msm(input.bases(), input.scalars()).expect("MSM failed in Schnorr") } } - impl sigma_protocol::Trait for Schnorr { + impl>, const N: usize, P: FpConfig> + sigma_protocol::Trait for Schnorr + { fn dst(&self) -> Vec { b"SCHNORR_SIGMA_PROTOCOL_DST".to_vec() } @@ -121,7 +151,11 @@ mod chaum_pedersen { // Implementing e.g. `Default` here would require a wrapper, but then `sigma_protocol::Trait` would have to get re-implemented... #[allow(non_snake_case)] - pub fn make_chaum_pedersen_instance() -> ChaumPedersen { + pub fn make_chaum_pedersen_instance< + C: CurveGroup>, + const N: usize, + P: FpConfig, + >() -> ChaumPedersen { let G_1 = C::generator().into_affine(); let G_2 = (G_1 * C::ScalarField::from(123456789u64)).into_affine(); @@ -138,7 +172,11 @@ mod chaum_pedersen { PairingTupleHomomorphism::G1>, Schnorr<::G2>>; #[allow(non_snake_case)] - pub fn make_inhomogeneous_chaum_pedersen_instance() -> InhomogChaumPedersen { + pub fn make_inhomogeneous_chaum_pedersen_instance< + E: Pairing>, + const N: usize, + P: FpConfig, + >() -> InhomogChaumPedersen { let G_1 = E::G1::generator().into_affine(); let G_2 = E::G2::generator().into_affine(); @@ -153,6 +191,32 @@ mod chaum_pedersen { } } +mod chunked_scalar_mul { + use super::*; + use aptos_dkg::pvss::chunky::chunked_scalar_mul; + + pub type InhomogChunkedScalarMul = PairingTupleHomomorphism< + E, + chunked_scalar_mul::Homomorphism<::G1>, + chunked_scalar_mul::Homomorphism<::G2>, + >; + + #[allow(non_snake_case)] + pub fn make_inhomogeneous_scalar_mul() -> InhomogChunkedScalarMul { + let G_1 = E::G1::generator().into_affine(); + let G_2 = E::G2::generator().into_affine(); + + let hom1 = chunked_scalar_mul::Homomorphism { base: G_1, ell: 16 }; + let hom2 = chunked_scalar_mul::Homomorphism { base: G_2, ell: 16 }; + + PairingTupleHomomorphism { + hom1, + hom2, + _pairing: std::marker::PhantomData, + } + } +} + #[test] fn test_schnorr() { use schnorr::*; @@ -160,11 +224,11 @@ fn test_schnorr() { let mut rng = thread_rng(); // ---- Bn254 ---- - let witness_bn = Scalar(sample_field_element(&mut rng)); + let witness_bn = sample_field_element(&mut rng); test_sigma_protocol::<::G1, _>(Schnorr::default(), witness_bn); // ---- Bls12_381 ---- - let witness_bls = Scalar(sample_field_element(&mut rng)); + let witness_bls = sample_field_element(&mut rng); test_sigma_protocol::<::G1, _>(Schnorr::default(), witness_bls); } @@ -175,18 +239,46 @@ fn test_chaum_pedersen() { let mut rng = thread_rng(); // ---- Bn254 ---- - let witness_bn = Scalar(sample_field_element(&mut rng)); + let witness_bn = sample_field_element(&mut rng); test_sigma_protocol::<::G1, _>(make_chaum_pedersen_instance(), witness_bn); - test_imhomog_chaum_pedersen::(make_inhomogeneous_chaum_pedersen_instance(), witness_bn); + test_imhomog_chaum_pedersen::( + make_inhomogeneous_chaum_pedersen_instance(), + witness_bn, + ); // ---- Bls12_381 ---- - let witness_bls = Scalar(sample_field_element(&mut rng)); + let witness_bls = sample_field_element(&mut rng); test_sigma_protocol::<::G1, _>( make_chaum_pedersen_instance(), witness_bls, ); - test_imhomog_chaum_pedersen::( + test_imhomog_chaum_pedersen::( make_inhomogeneous_chaum_pedersen_instance(), witness_bls, ); + + use crate::chunked_scalar_mul::make_inhomogeneous_scalar_mul; + use aptos_dkg::pvss::chunky::{chunked_scalar_mul::Witness, chunks}; + + let ell = 16u8; + + let scalars = sample_field_elements(1, &mut rng); + + use ark_bn254::Fr; + + let chunked_values: Vec>>> = scalars + .iter() + .map(|s| { + vec![chunks::scalar_to_le_chunks(ell, s) + .into_iter() + .map(Scalar) + .collect::>()] + }) + .collect(); + + let witness = Witness { + chunked_values: chunked_values.clone(), + }; + + test_imhomog_scalar_mul::(make_inhomogeneous_scalar_mul(), witness); }