diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..5a161074 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,21 @@ +language: rust +cache: cargo + +rust: + - nightly + +env: + - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='' + - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='yolocrypto' + # run cargo bench with a filter that matches no benchmarks. + # this ensures the benchmarks build but doesn't run them on the CI server. + - TEST_COMMAND=bench EXTRA_FLAGS='"DONTRUNBENCHMARKS"' FEATURES='yolocrypto' + +script: + - cargo $TEST_COMMAND --features="$FEATURES" $EXTRA_FLAGS + +# enable this integration if we upstream the repo +#notifications: +# slack: +# rooms: +# - dalek-cryptography:Xxv9WotKYWdSoKlgKNqXiHoD#dalek-bots diff --git a/Cargo.toml b/Cargo.toml index 8a65a178..104e38d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,35 @@ [package] -name = "ristretto-bp" +name = "ristretto-bulletproofs" version = "0.1.0" -authors = ["Cathie "] +authors = ["Cathie Yun ", + "Henry de Valence ", + "Oleg Andreev "] +readme = "README.md" +license = "MIT" +repository = "https://github.com/chain/ristretto-bulletproofs" +categories = ["cryptography"] +keywords = ["cryptography", "ristretto", "zero-knowledge", "bulletproofs"] +description = "A pure-Rust implementation of Bulletproofs using Ristretto" [dependencies] -#curve25519-dalek = "^0.14" -curve25519-dalek = { git = "https://github.com/dalek-cryptography/curve25519-dalek", branch = "develop", features = ["nightly"]} +curve25519-dalek = { version = "^0.16", features = ["serde", "nightly"] } +subtle = "0.6" sha2 = "^0.7" -rand = "^0.4" \ No newline at end of file +rand = "^0.4" +byteorder = "1.2.1" +serde = "1" +serde_derive = "1" +tiny-keccak = "1.4.1" + +[dev-dependencies] +hex = "^0.3" +criterion = "0.2" +bincode = "1" + +[features] +yolocrypto = ["curve25519-dalek/yolocrypto"] + +[[bench]] +name = "bulletproofs" +harness = false + diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..4969a6e0 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Chain, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..095ca5a5 --- /dev/null +++ b/Makefile @@ -0,0 +1,8 @@ +FEATURES := + +doc: + cargo rustdoc --features "$(FEATURES)" -- --html-in-header docs/assets/rustdoc-include-katex-header.html + +doc-internal: + cargo rustdoc --features "$(FEATURES)" -- --html-in-header docs/assets/rustdoc-include-katex-header.html --document-private-items + diff --git a/README.md b/README.md index 3aba0662..63339971 100644 --- a/README.md +++ b/README.md @@ -1 +1,73 @@ -# ristretto-bulletproofs +# Ristretto Bulletproofs + +A pure-Rust implementation of [Bulletproofs][bp_website] using [Ristretto][ristretto]. + +This crate contains both an implementation and a set of notes on how and why +Bulletproofs work. The [external documentation][doc_external] describes how to use this +crate’s API, while the [internal documentation][doc_internal] contains the notes. + +## WARNING + +This code is still research-quality. It is not (yet) suitable for deployment. + +## Documentation + +* [Public API documentation][doc_external] +* [Internal documentation][doc_internal] + * [Notes on how Bulletproofs work][bp_notes] (located in the internal `notes` module) + * [Range proof protocol description][rp_notes] + * [Inner product protocol description][ipp_notes] + + +Unfortunately, `cargo doc` does not yet have support for custom HTML injection +and for documenting private members, so the documentation is built using: + +```text +make doc # Builds external documentation +make doc-internal # Builds internal documentation +``` + +Note: `cargo doc --open` rebuilds the docs without the custom +invocation, so it may be necessary to rerun `make`. + +## Tests + +Run tests with `cargo test`. + +## Benchmarks + +This crate uses [criterion.rs][criterion] for benchmarks. Run benchmarks with +`cargo bench`. + +## Features + +The `yolocrypto` feature enables the `yolocrypto` feature in +`curve25519-dalek`, which enables the experimental AVX2 backend. To use it for +Bulletproofs, the `target_cpu` must support AVX2: + +```text +RUSTFLAGS="-C target_cpu=skylake" cargo bench --features "yolocrypto" +``` + +Skylake-X CPUs have double the AVX2 registers. To use them, try + +```text +RUSTFLAGS="-C target_cpu=skylake-avx512" cargo bench --features "yolocrypto" +``` + +This prevents spills in the AVX2 parallel field multiplication code, but causes +worse code generation elsewhere ¯\\\_(ツ)\_/¯ + +## About + +This is a research project being built for Chain, Inc, by Henry de Valence, +Cathie Yun, and Oleg Andreev. + +[bp_website]: https://crypto.stanford.edu/bulletproofs/ +[ristretto]: https://doc.dalek.rs/curve25519_dalek/ristretto/index.html +[doc_external]: https://doc.dalek.rs/ristretto_bulletproofs/index.html +[doc_internal]: https://doc-internal.dalek.rs/ristretto_bulletproofs/index.html +[bp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/notes/index.html +[rp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/range_proof/index.html +[ipp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/inner_product_proof/index.html +[criterion]: https://github.com/japaric/criterion.rs diff --git a/Testfile b/Testfile new file mode 100644 index 00000000..efcfd7b8 --- /dev/null +++ b/Testfile @@ -0,0 +1,2 @@ +rustfmt: rustfmt --version && cargo fmt -- --write-mode=diff +cargotest: cargo test diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs new file mode 100644 index 00000000..b7a06e47 --- /dev/null +++ b/benches/bulletproofs.rs @@ -0,0 +1,150 @@ +#![allow(non_snake_case)] +#[macro_use] +extern crate criterion; +use criterion::Criterion; + +extern crate rand; +use rand::{OsRng, Rng}; + +extern crate curve25519_dalek; +use curve25519_dalek::scalar::Scalar; + +extern crate ristretto_bulletproofs; +use ristretto_bulletproofs::ProofTranscript; +use ristretto_bulletproofs::RangeProof; +use ristretto_bulletproofs::{Generators, PedersenGenerators}; + +static AGGREGATION_SIZES: [usize; 6] = [1, 2, 4, 8, 16, 32]; + +fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { + let label = format!("Aggregated {}-bit rangeproof creation", n); + + c.bench_function_over_inputs( + &label, + move |b, &&m| { + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut rng = OsRng::new().unwrap(); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); + + b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + + RangeProof::prove_multiple( + &generators, + &mut transcript, + &mut rng, + &values, + &blindings, + n, + ) + }) + }, + &AGGREGATION_SIZES, + ); +} + +fn create_aggregated_rangeproof_n_8(c: &mut Criterion) { + create_aggregated_rangeproof_helper(8, c); +} + +fn create_aggregated_rangeproof_n_16(c: &mut Criterion) { + create_aggregated_rangeproof_helper(16, c); +} + +fn create_aggregated_rangeproof_n_32(c: &mut Criterion) { + create_aggregated_rangeproof_helper(32, c); +} + +fn create_aggregated_rangeproof_n_64(c: &mut Criterion) { + create_aggregated_rangeproof_helper(64, c); +} + +fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { + let label = format!("Aggregated {}-bit rangeproof verification", n); + + c.bench_function_over_inputs( + &label, + move |b, &&m| { + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut rng = OsRng::new().unwrap(); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); + + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + let proof = RangeProof::prove_multiple( + &generators, + &mut transcript, + &mut rng, + &values, + &blindings, + n, + ).unwrap(); + + // XXX would be nice to have some convenience API for this + let pg = &generators.all().pedersen_generators; + let value_commitments: Vec<_> = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .collect(); + + b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + + proof.verify( + &value_commitments, + generators.all(), + &mut transcript, + &mut rng, + n, + ) + }); + }, + &AGGREGATION_SIZES, + ); +} + +fn verify_aggregated_rangeproof_n_8(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(8, c); +} + +fn verify_aggregated_rangeproof_n_16(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(16, c); +} + +fn verify_aggregated_rangeproof_n_32(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(32, c); +} + +fn verify_aggregated_rangeproof_n_64(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(64, c); +} + +criterion_group!{ + name = create_rp; + config = Criterion::default().sample_size(10); + targets = + create_aggregated_rangeproof_n_8, + create_aggregated_rangeproof_n_16, + create_aggregated_rangeproof_n_32, + create_aggregated_rangeproof_n_64, +} + +criterion_group!{ + name = verify_rp; + config = Criterion::default(); + targets = + verify_aggregated_rangeproof_n_8, + verify_aggregated_rangeproof_n_16, + verify_aggregated_rangeproof_n_32, + verify_aggregated_rangeproof_n_64, +} + +criterion_main!(create_rp, verify_rp); diff --git a/docs/assets/rustdoc-include-katex-header.html b/docs/assets/rustdoc-include-katex-header.html new file mode 100644 index 00000000..51232f44 --- /dev/null +++ b/docs/assets/rustdoc-include-katex-header.html @@ -0,0 +1,17 @@ + + + + + diff --git a/docs/inner-product-protocol.md b/docs/inner-product-protocol.md new file mode 100644 index 00000000..ed9fc9df --- /dev/null +++ b/docs/inner-product-protocol.md @@ -0,0 +1,141 @@ +The `inner_product_proof` module contains API for producing a compact proof of an inner product of two vectors. + +Inner product argument protocol +=============================== + +These notes explain how the protocol is implemented in the [`InnerProductProof`](struct.InnerProductProof.html) type. + +We want to prove the relation +\\[ +\operatorname{PK}\left\\{ + ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P', Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) + : P' = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q +\right\\} +\\] where \\(n = 2^{k}\\) is a power of \\(2\\). + +Prover’s algorithm +------------------ + +To start, we sketch the +interactive version of this protocol, and then describe the +optimizations discussed in the Bulletproofs paper for the +non-interactive version. + +The protocol consists of \\(k = \lg n\\) rounds, indexed by +\\(j = k,\ldots,1\\). In the \\(j\\)-th round, the prover computes +\\[ +\begin{aligned} + L\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{G}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{hi}}, {\mathbf{H}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{b}}\_{\operatorname{hi}} \rangle} Q, \\\\ + R\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{G}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{lo}}, {\mathbf{H}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{b}}\_{\operatorname{lo}} \rangle} Q, +\end{aligned} +\\] +and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a +challenge value \\(u\_{j} {\xleftarrow{\\$}}{\mathbb{Z}\_p}\\). The prover uses +\\(u\_{j}\\) to compute +\\[ +\begin{aligned} + {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot u\_{j} + u\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ + {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot u\_{j}^{-1} + u\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, +\end{aligned} +\\] +the prover and verifier both compute +\\[ +\begin{aligned} + {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot u\_{j}^{-1} + u\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ + {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot u\_{j} + u\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, +\end{aligned} +\\] +and use these vectors (all of length \\(2^{j-1}\\)) for the next round. +After the last (\\(j = 1\\)) round, the prover sends +\\(a, b = {\mathbf{a}}\_{0}, {\mathbf{b}}\_{0}\\) to the verifier, who accepts +if and only if +\\[ +\begin{aligned} +L\_{1} u\_{1}^{2} + \cdots + L\_{k} u\_{k}^{2} + P' + R\_{k} u\_{k}^{-2} + \cdots + R\_{1} u\_{1}^{-2}&\overset ? = aG + bH + abQ, +\end{aligned} +\\] +where \\(G, H = {\mathbf{G}}\_{0}, {\mathbf{H}}\_{0}\\). + +To make the protocol noninteractive, we replace the transmission of the +\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(u\_{j}\\) with a Fiat-Shamir +challenge, so that each \\(u\_{j}\\) is generated as a hash of the transcript +\\(L\_{k},R\_{k},\ldots,L\_{j},R\_{j}\\). At the end of the prover’s +computation, they send \\(a,b,L\_{k},R\_{k},\ldots,L\_{1},R\_{1}\\) to the +verifier. + +Verifier’s algorithm +-------------------- + +Since the final \\(G\\) and \\(H\\) values are functions of the challenges +\\(u\_{k},\ldots,u\_{1}\\), the verifier has to compute them as part of the +verification process. However, while the prover needs to compute the +intermediate vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) in order to compute +the \\(L\_{j}\\) and \\(R\_{j}\\), the verifier doesn’t, and can compute the final +\\(G\\), \\(H\\) directly from the vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and +the challenges \\(u\_{k}, \ldots, u\_{1}\\). + +Let \\({\mathbf{G}}^{(j)}\\) be the value of \\({\mathbf{G}}\\) in the \\(j\\)-th +round, and let \\(G\_{i}\\) be the \\(i\\)-th entry of the initial vector +\\({\mathbf{G}}^{(k)} = +(G\_{0}, \ldots, G\_{n-1})\\). We have \\[ +\begin{aligned} + {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} u\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} u\_{j},\end{aligned} +\\] +so the coefficient of \\(G\_{i}\\) in the final \\(G\\) value is +\\[ +\begin{aligned} + s\_{i} &= u\_{k}^{b(i,k)} \cdots u\_1^{b(i,1)},\end{aligned} +\\] where +\\(b(i,j)\\) is either \\(-1\\) or \\(+1\\), according to whether \\(G\_{i}\\) appears in +the left or right half of \\({\mathbf{G}}^{(j)}\\). Since \\(G\_{i}\\) appears in +the \\((i \mod 2^{j})\\)-th entry of \\({\mathbf{G}}^{j}\\), this is +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if $(i \mod 2^{j}) < 2^{j-1}$ }\\\\ + +1 & \text{if $(i \mod 2^{j}) \ge 2^{j-1}$ }\\\\ + \end{cases}. +\\] +But this is exactly +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if bit $j-1$ of $i$ is 0} \\\\ + +1 & \text{if bit $j-1$ of $i$ is 1} \\\\ + \end{cases}. +\\] +This shows that +\\(G = {\langle {\mathbf{s}}, {\mathbf{G}} \rangle}\\). This formula differs +slightly from the one in the paper, because we index vectors and bits +from \\(0\\). + +Since \\(H\\) is computed similarly, but with the roles of +\\({\mathbf{H}}\_{\operatorname{lo}}\\) and +\\({\mathbf{H}}\_{\operatorname{hi}}\\) reversed, a similar argument shows +that \\(H = {\langle 1/{\mathbf{s}}, {\mathbf{H}} \rangle}\\). + +Notice that if \\(i'\\) is the bitwise \\(\texttt{NOT}\\) of \\(i\\), then \\(s\_{i'} = 1/s\_{i}\\), +and as \\(i\\) runs from \\(0\\) to \\((2^k - 1)\\), \\(i'\\) runs from \\((2^k - 1)\\) to \\(0\\), +so the vector of inverses \\(1/{\mathbf{s}}\\) is a reversed +vector \\({\mathbf{s}}\\) and no additional computation is required to +obtain the \\(1/s\_{i}\\). + +### Verification equation + +The verifier’s computation then becomes +\\[ +\begin{aligned} +P' \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) \\\\ +=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right), +\end{aligned} +\\] +a single multiscalar multiplication with +\\(n + n + 1 + k + k = 2(n+k) + 1\\) points. + +In order to combine the computation above with other checks in a parent protocol, we can provide these scalars: + +\\[ + \\{u\_{1}^{2}, \dots, u\_{k}^{2}, u\_{1}^{-2}, \dots, u\_{k}^{-2}, s_0, \dots, s_{n-1}\\}. +\\] + +Use the [`InnerProductProof::verification_scalars`](struct.InnerProductProof.html#method.verification_scalars) method to produce these scalars for a given inner product proof. diff --git a/docs/notes.md b/docs/notes.md new file mode 100644 index 00000000..d2439bf5 --- /dev/null +++ b/docs/notes.md @@ -0,0 +1,882 @@ +This module contains notes on how and why Bulletproofs work. + +The documentation is laid out roughly as follows. General notes on +the range proof and inner-product proofs are here. The description of +each protocol is contained in the respective `range_proof` and +`inner_product_proof` modules. Finally, structs from those modules +are publicly re-exported from the crate root, so that the external +documentation describes how to use the API, while the internal +documentation describes how it works. + +Notation +======== + +We change notation from the original [Bulletproofs paper][bulletproofs_paper]. +The primary motivation is that our implementation uses additive notation, and +we would like our description of the protocol to use the same notation as the +implementation. + +In general, we use lower-case letters +\\(a, b, c\\) +for scalars in +\\({\mathbb Z\_p}\\) +and upper-case letters +\\(G,H,P,Q\\) +for group elements in +\\({\mathbb G}\\). +Vectors are denoted as \\({\mathbf{a}}\\) and \\({\mathbf{G}}\\), +and the inner product of two vectors is denoted by +\\({\langle -, - \rangle}\\). Notice that +\\({\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \in {\mathbb Z\_p}\\) +produces a scalar, while +\\({\langle {\mathbf{a}}, {\mathbf{G}} \rangle} \in {\mathbb G}\\) +is a multiscalar multiplication. The vectors of all \\(0\\) and all \\(1\\) are +denoted by \\({\mathbf{0}}\\), \\({\mathbf{1}}\\) respectively. + +Vectors are indexed starting from \\(0\\), unlike the paper, which indexes +from \\(1\\). For a scalar \\(y\\), we write +\\[ +\begin{aligned} + {\mathbf{y}}^{n} &= (1,y,y^{2},\ldots,y^{n-1}) +\end{aligned} +\\] +for the vector whose \\(i\\)-th entry is \\(y^{i}\\). For vectors +\\({\mathbf{v}}\\) of even +length \\(2k\\), we define \\({\mathbf{v}}\_{\operatorname{lo}}\\) and +\\({\mathbf{v}}\_{\operatorname{hi}}\\) to be the low and high halves of +\\({\mathbf{v}}\\): +\\[ +\begin{aligned} + {\mathbf{v}}\_{\operatorname{lo}} &= (v\_0, \ldots, v\_{k-1})\\\\ + {\mathbf{v}}\_{\operatorname{hi}} &= (v\_{k}, \ldots, v\_{2k-1}) +\end{aligned} +\\] +Pedersen commitments are written as +\\[ +\begin{aligned} + \operatorname{Com}(v) &= \operatorname{Com}(v, {\widetilde{v}}) = v \cdot B + {\widetilde{v}} \cdot {\widetilde{B}}, +\end{aligned} +\\] +where \\(B\\) and \\({\widetilde{B}}\\) are the generators used for the values +and blinding factors, respectively. We denote the blinding factor for +the value \\(v\\) by \\({\widetilde{v}}\\), so that it is clear which blinding +factor corresponds to which value, and write \\(\operatorname{Com}(v)\\) +instead of \\(\operatorname{Com}(v, {\widetilde{v}})\\) for brevity. + +We also make use of *vector Pedersen commitments*, which we define for +pairs of vectors as \\[ +\begin{aligned} + \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}) + &= \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}}) + = {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_{R}, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}},\end{aligned} +\\] +where \\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) are vectors of generators. +Notice that this is exactly the same as taking a commitment to the +vector of values \\({\mathbf{a}}\_{L} \Vert {\mathbf{a}}\_{R}\\) with the +vector of bases \\({\mathbf{G}} \Vert {\mathbf{H}}\\), but defining the +commitment on pairs of vectors is a more convenient notation. + +The variable renaming is as follows: +\\[ +\begin{aligned} + g &\xrightarrow{} B & \gamma &\xrightarrow{} \tilde{v} \\\\ + h &\xrightarrow{} \tilde{B} & \alpha &\xrightarrow{} \tilde{a} \\\\ + {\mathbf{g}} &\xrightarrow{} {\mathbf{G}} & \rho &\xrightarrow{} \tilde{s} \\\\ + {\mathbf{h}} &\xrightarrow{} {\mathbf{H}} & \tau\_i &\xrightarrow{} \tilde{t}\_i \\\\ + & & \mu &\xrightarrow{} \tilde{e} \\\\ +\end{aligned} +\\] + +Range Proofs from inner products +================================ + +The goal of a *range proof* is for a prover to convince a verifier +that a particular value \\(v\\) lies within a valid range, without revealing +any additional information about the value \\(v\\). + +The prover begins with a secret value \\(v\\) and commitment +\\(V = \operatorname{Com}(v)\\), which it sends to the verifier. The prover +wishes to convince the verifier that +\\[ +\begin{aligned} + v &\in [0, 2^{n}) +\end{aligned} +\\] +without revealing \\(v\\). + +Since the prover will eventually use an inner product proof to do this, +we want to work towards expressing this condition +in terms of a single inner product. In this section, we construct +successive statements which imply \\(v \in [0,2^{n})\\) +until we arrive at the ones the prover will use to convince +the verifier. + +Proving range statements with bit vectors +----------------------------------------- + +Let \\({\mathbf{a}}\\) be the vector of bits of \\(v\\). +Then \\(v\\) can be represented as an inner product of bits \\({\mathbf{a}}\\) +and powers of two \\({\mathbf{2}}^{n} = (1,2,4,\ldots,2^{n-1})\\): +\\[ +\begin{aligned} + v &= {\langle {\mathbf{a}}, {\mathbf{2}}^{n} \rangle} \\\\ + &= a_{0}\cdot 2^0 + \dots + a_{n-1}\cdot 2^{n-1}. +\end{aligned} +\\] +We need \\({\mathbf{a}}\\) to be a vector of integers \\(\\{0,1\\}\\). +This can be expressed with an additional condition +\\[ +{\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) = {\mathbf{0}}, +\\] +where \\({\mathbf{x}} \circ {\mathbf{y}}\\) denotes the entry-wise multiplication of two vectors. +The result of multiplication can be all-zero if and only if every bit is actually \\(0\\) or[^1] \\(1\\). + +As a result of representing value in binary, the range condition \\(v \in [0, 2^{n})\\) +is equivalent to the pair of conditions +\\[ +\begin{aligned} + {\langle {\mathbf{a}}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) &= {\mathbf{0}}. +\end{aligned} +\\] +We will +eventually need to make separate commitments to the vectors +\\({\mathbf{a}}\\) and \\({\mathbf{a}} - {\mathbf{1}}\\), so we set +\\({\mathbf{a}}\_{L} = {\mathbf{a}}\\), +\\({\mathbf{a}}\_{R} = {\mathbf{a}} - {\mathbf{1}}\\) to obtain +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\mathbf{a}}\_{L} \circ {\mathbf{a}}\_{R} &= {\mathbf{0}}, \\\\ + ({\mathbf{a}}\_{L} - {\mathbf{1}}) - {\mathbf{a}}\_{R} &= {\mathbf{0}}. +\end{aligned} +\\] + +[^1]: Generally, condition \\(x=0 \vee y=0\\) can be expressed as \\(x \cdot y = 0\\), +as the product can be zero if and only if at least one of the terms is zero. +This trick allows implementing logical `OR` with any number of terms. + + +Proving vectors of statements with a single statement +----------------------------------------------------- + +The statements above are statements about vectors, or equivalently, a +vector of statements about each entry. We want to combine all of these +into a single statement. + +First, we will combine each of the two vector-statements into a single statement. +Since \\({\mathbf{b}} = {\mathbf{0}}\\) if and only +if[^2] \\({\langle {\mathbf{b}}, {\mathbf{y}}^{n} \rangle} = 0\\) for every \\(y\\), +the statements above are implied by +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\langle {\mathbf{a}}\_{L} - {\mathbf{1}} - {\mathbf{a}}\_{R}, {\mathbf{y}}^{n} \rangle} &= 0, \\\\ + {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} &= 0 +\end{aligned} +\\] +for the verifier’s choice of a challenge value \\(y\\). + +The three resulting statements can then be combined in the same way, +using the verifier’s choice of \\(z\\): +\\[ +\begin{aligned} +z^{2} v +&= + z^{2} {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} + + z {\langle {\mathbf{a}}\_{L} - {\mathbf{1}} - {\mathbf{a}}\_{R}, {\mathbf{y}}^{n} \rangle} + + {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\end{aligned} +\\] + +[^2]: This is because the polynomial in terms of \\(y\\) is zero at every point +if and only if every term of it is zero. The verifier is going to sample +a random \\(y\\) after the prover commits to all the values forming the terms of +that polynomial, making the probability that the prover cheated negligible. +This trick allows implementing logical `AND` with any number of terms. + + +Combining inner products +------------------------ + +Finally, we want to combine these terms into a single inner product. Our +goal is to rearrange the inner product above so that terms +involving \\({\mathbf{a}}\_{L}\\) appear only on the left-hand side, terms +involving \\({\mathbf{a}}\_{R}\\) appear only on the right-hand side, and +non-secret terms (which the verifier can compute on its own) are +factored out into a new term \\(\delta\\). + +First, break the statement into simpler terms, then rearrange: +\\[ +\begin{aligned} +z^2 v +&= +z^2 {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^n \rangle} +\+ z {\langle {\mathbf{a}}\_{L}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{a}}\_{R}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{1}}, {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +z^2 {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^n \rangle} +\+ z {\langle {\mathbf{a}}\_{L}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, z {\mathbf{y}}^n \rangle} +\+ {\langle -z {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\+ {\langle -z {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\end{aligned} +\\] +To combine the terms on the right-hand side, add +\\({\langle -z {\mathbf{1}}, z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n \rangle}\\) +to each side, then simplify: +\\[ +\begin{aligned} +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +\- {\langle z {\mathbf{1}}, z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\\\\ +&+ {\langle -z {\mathbf{1}} , z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^2 v +\+ (z - z^2) {\langle {\mathbf{1}}, {\mathbf{y}}^n \rangle} +\- z^3 {\langle {\mathbf{1}}, {\mathbf{2}}^n \rangle} +&= +{\langle {\mathbf{a}}\_{L} - z{\mathbf{1}}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\end{aligned} +\\] +Combining all non-secret terms outside the inner product +\\[ + \delta(y,z) = (z - z^{2}) {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} - z^{3} {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}, +\\] +we finally obtain +\\[ + z^{2}v + \delta(y,z) = {\langle {\mathbf{a}}\_{L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} \rangle}. +\\] +This is equivalent to the original inner-product equation, but has a single +inner product with \\({\mathbf{a}}\_{L}\\) on the left, \\({\mathbf{a}}\_{R}\\) on +the right, and non-secret terms factored out. + +Blinding the inner product +-------------------------- + +The prover cannot send the left and right vectors in +the single inner-product equation to the verifier without revealing information +about the value \\(v\\), and since the inner-product argument is not +zero-knowledge, they cannot be used there either. + +Instead, the prover chooses vectors of blinding factors +\\[ +{\mathbf{s}}\_{L}, {\mathbf{s}}\_{R} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n}, +\\] +and uses them to construct vector polynomials +\\[ +\begin{aligned} + {\mathbf{l}}(x) &= {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x = ({\mathbf{a}}\_{L} + {\mathbf{s}}\_{L} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x = {\mathbf{y}}^{n} \circ \left( ({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\right) + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} +\end{aligned} +\\] +These are the left and right sides of the combined inner product with \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\) +replaced by blinded terms \\({\mathbf{a}}\_{L} + {\mathbf{s}}\_{L} x\\), +\\({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\\). Notice that since only the +blinding factors \\({\mathbf{s}}\_{L}\\), \\({\mathbf{s}}\_{R}\\) are multiplied +by \\(x\\), the vectors \\({\mathbf{l}}\_{0}\\) and \\({\mathbf{r}}\_{0}\\) are +exactly the left and right sides of the unblinded single inner-product: +\\[ + {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle} = z^{2}v + \delta(y,z) +\\] + +Setting +\\[ + t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} = t\_{0} + t\_{1} x + t\_{2} x^{2}, +\\] +we can express the coefficients of \\(t(x)\\) using Karatsuba’s method: +\\[ +\begin{aligned} + t\_{0} &= {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle}, \\\\ + t\_{2} &= {\langle {\mathbf{l}}\_{1}, {\mathbf{r}}\_{1} \rangle}, \\\\ + t\_{1} &= {\langle {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1}, {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} \rangle} - t\_{0} - t\_{2} +\end{aligned} +\\] +Since \\[ +\begin{aligned} + t\_{0} &= {\langle {\mathbf{a}}\_{L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} 2^{n} \rangle},\end{aligned} +\\] +for the prover to convince the verifier that the unblinded single inner-product equation +holds, it’s enough to prove that the constant term \\(t\_{0}\\) of \\(t(x)\\) is +\\(z^{2} v + \delta(y,z)\\), and that +this \\(t(x)\\) is the correct polynomial. +Proving that \\(t(x)\\) is correct means proving that +\\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correctly formed, and that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). + +Proving that \\(t\_{0}\\) is correct +------------------------------------ + +In order to prove that the constant term of \\(t(x)\\) is +\\(z^{2} v + \delta(y,z)\\), the prover first forms a commitment to the +coefficients of \\(t(x)\\), then convinces the verifier that these commit to +the correct \\(t(x)\\) by evaluating the polynomial at a challenge point +\\(x\\). + +The prover has already used \\(V = \operatorname{Com}(v)\\) to commit to \\(v\\) +(and hence to \\(t\_{0}\\)), so the prover forms commitments +\\(T\_{1} = \operatorname{Com}(t\_{1})\\) and +\\(T\_{2} = \operatorname{Com}(t\_{2})\\), then sends these to the verifier. +The commitments \\(V\\), \\(T\_{1}\\), \\(T\_{2}\\) are related to each other and to +\\(t(x)\\) by the following diagram: +\\[ +\begin{aligned} + t(x) B &\quad &= \quad & z^{2}vB & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x t\_{1} B &\quad &+\quad & x^2 t\_{2} B \\\\ + + &\quad & \quad & + & \quad & \quad & + & \quad & \quad& + &\quad & \quad & + \\\\ + {\tilde{t}}(x) {\widetilde{B}} &\quad &= \quad & z^2 {\widetilde{v}} {\widetilde{B}} & \quad &+ \quad & 0 {\widetilde{B}} & \quad &+ \quad& x {\tilde{t}}\_{1} {\widetilde{B}} &\quad &+\quad & x^{2} {\tilde{t}}\_{2} {\widetilde{B}} \\\\ + \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel & \quad & \quad& \shortparallel &\quad & \quad & \shortparallel \\\\ + &\quad &= \quad & z^2 V & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x T\_{1} &\quad &+\quad & x^{2} T\_{2} +\end{aligned} +\\] +Notice that the sum of each column is a commitment to the variable in the top +row using the blinding factor in the second row. +The sum of all of the columns is +\\(t(x) B + {\tilde{t}}(x) {\widetilde{B}}\\), a commitment to the value +of \\(t\\) at the point \\(x\\), using the synthetic blinding factor[^3] +\\[ + {\tilde{t}}(x) = z^{2} {\tilde{v}} + x {\tilde{t}}\_{1} + x^{2} {\tilde{t}}\_{2}. +\\] +To convince the verifier that +\\(t(x) = z^2v + \delta(y,z) + t\_{1} x + t\_{2} x^{2}\\), the prover sends +the opening \\(t(x), {\tilde{t}}(x)\\) to the verifier, who uses the +bottom row of the diagram to check consistency: +\\[ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. +\\] + +[^3]: The blinding factor is synthetic in the sense that it is + synthesized from the blinding factors of the other commitments. + +Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct +--------------------------------------------------------------------- + +We want to relate \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) to commitments +to \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\), \\({\mathbf{s}}\_{L}\\), and +\\({\mathbf{s}}\_{R}\\). However, since \\[ +\begin{aligned} + {\mathbf{r}}(x) &= {\mathbf{y}}^{n} \circ \left( ({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\right) + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n},\end{aligned} +\\] +we need commitments to \\({\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}\\) and +\\({\mathbf{y}}^{n} \circ {\mathbf{s}}\_{R}\\). However, since the prover +must form commitments before receiving the verifier’s challenge \\(y\\), the +prover can only commit to \\(a\_{R}\\) and \\(s\_{R}\\). Since the prover’s +commitments are to \\(a\_{R}\\) and \\(s\_{R}\\), the verifier needs to transmute +the prover’s commitment +\\( +\operatorname{Com}({\mathbf{a}}\_{L},{\mathbf{a}}\_{R}, {\widetilde{a}}) +\\) +into a commitment +\\( +\operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\widetilde{a}}) +\\) +(and similarly for \\({\mathbf{s}}\_{R}\\)). +To do this, notice that +\\[ +\begin{aligned} + \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}}) + &= + {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_{R}, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}} \\\\ + &= + {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\mathbf{y}}^{-n} \circ {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}}, +\end{aligned} +\\] +so that by changing generators to +\\({\mathbf{H}}' = {\mathbf{y}}^{-n} \circ {\mathbf{H}}\\), the point which +is a commitment to +\\(({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}})\\) with respect to +\\(({\mathbf{G}}, {\mathbf{H}}, {\widetilde{a}})\\) is transmuted into a +commitment to +\\(({\mathbf{a}}\_{L}, {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\widetilde{a}})\\) +with respect to \\(({\mathbf{G}}, {\mathbf{H}}', {\widetilde{a}})\\). + +To relate the prover’s commitments +\\(A = \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R})\\) and +\\(S = \operatorname{Com}({\mathbf{s}}\_{L}, {\mathbf{s}}\_{R})\\) to +\\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\), we use the following diagram: +\\[ +\begin{aligned} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} &\quad &= \quad & {\langle {\mathbf{a}}\_L, {\mathbf{G}} \rangle} & \quad &+ \quad & x {\langle {\mathbf{s}}\_L, {\mathbf{G}} \rangle} &\quad &+\quad & {\langle -z{\mathbf{1}}, {\mathbf{G}} \rangle} \\\\ + + &\quad & \quad & + & \quad & \quad & + &\quad & \quad & + \\\\ + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle} &\quad &= \quad & {\langle {\mathbf{a}}\_R, {\mathbf{H}} \rangle} & \quad &+ \quad & x {\langle {\mathbf{s}}\_R, {\mathbf{H}} \rangle} &\quad &+\quad & {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} \\\\ + + &\quad & \quad & + & \quad & \quad & + &\quad & \quad & \\\\ + {\widetilde{e}} {\widetilde{B}} &\quad &= \quad & {\widetilde{a}} {\widetilde{B}} & \quad &+ \quad & x {\widetilde{s}} {\widetilde{B}} &\quad & \quad & \\\\ + \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel &\quad & \quad & \shortparallel \\\\ + &\quad &= \quad & A & \quad &+ \quad & x S &\quad &+\quad & {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} +\end{aligned} +\\] +We can interpret the rows and columns similarly to the previous diagram: +the sum of each column is a vector Pedersen commitment with left and right halves from the first and second rows respectively +and blinding factor from the third row. +The sum of all of the columns is a vector +Pedersen commitment to \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) with +synthetic blinding factor \\({\widetilde{e}}\\). + +To convince the verifier that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\), the prover +sends \\({\widetilde{e}}\\) to the verifier, who uses the bottom row +to compute +\\[ +\begin{aligned} + P &= -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}\\\\ + &= -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{1}} + z^2 {\mathbf{y}^{-n}} \circ {\mathbf{2}}^n, {\mathbf{H}} \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}; +\end{aligned} +\\] +if the prover is honest, this is +\\(P = {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle}\\), +so the verifier uses \\(P\\), \\(t(x)\\) as inputs to the inner-product protocol +to prove that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). + +Inner-product proof +=================== + +First, let’s observe that the prover can simply send vectors +\\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) and the verifier can check +directly that the inner product \\(t(x)\\) and commitment \\(P\\) provided in +the protocols 1 and 2 are correct. This will not leak information (the +secret bits in these vectors are blinded), but will require us to +transfer \\(2n\\) scalars between a prover and a verifier. + +To minimize the bandwidth cost we will use the inner-product argument +protocol which enables us to prove *indirectly* and with \\(O(log(n))\\) +communication cost, that a given inner product \\(t(x)\\) and a commitment +\\(P\\) are related as: +\\[ +\begin{aligned} +t(x) &= {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} \\\\ +P &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle} +\end{aligned} +\\] +To make the presentation +cleaner, we will change the notation to one used specifically in the +inner product argument which is not to be confused with the notation in +the rangeproof protocol: +\\[ +\begin{aligned} +{\mathbf{a}}, {\mathbf{b}} &\in {\mathbb Z\_{p}^{n}}\\\\ +{\mathbf{G}}, {\mathbf{H}} &\in {\mathbb G^{n}}\\\\ +c &= {\langle {\mathbf{a}}, {\mathbf{b}} \rangle}\\\\ +P &= {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} +\end{aligned} +\\] +Within the above definitions we need a proof of knowledge +for the following relation: +\\[ +\begin{aligned} + P &{}={}&& {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} \hspace{0.2cm} \wedge\\\\ + c &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} +\end{aligned} +\\] +Let’s combine these two statements into one equation using an +indeterminate variable \\(w \in {\mathbb Z\_{p}^{\times}}\\) and multiplying the +second equation by an orthogonal generator +\\(B \in {\mathbb G}\\): +\\[ +\begin{aligned} + P &{}={}&& {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle}\\\\ + &{}+{}&&\\\\ + c w B &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} w B +\end{aligned} +\\] +Let’s simplify the resulting equation using the following definitions: +\\[ +\begin{aligned} + k &= \lg n \\\\ + P' &= P + cwB \\\\ + Q &= wB +\end{aligned} +\\] +The equation becomes: +\\[ + P' = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q +\\] +The combined equation is useful because it will allow us +to compress each vector in half and arrive to the same form. By doing +such compression \\(\lg n\\) times we will end up with an equation where +both vectors are one-element long and we can simply transmit them to +check the final equality directly. + +If the prover can demonstrate that the above \\(P'\\) has such structure +over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(Q\\) for all +\\(w \in {\mathbb Z\_{p}^{*}}\\), then the original \\(P\\) and \\(c\\) must satisfy +the original relation +\\((P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} +\wedge c = {\langle {\mathbf{a}}, {\mathbf{b}} \rangle})\\). + +Let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times}}\\) +and compress the vectors by adding the left and the right halves +separated by the variable \\(u\_k\\): +\\[ +\begin{aligned} + {\mathbf{a}}^{(k-1)} &= {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R \\\\ + {\mathbf{b}}^{(k-1)} &= {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \\\\ + {\mathbf{G}}^{(k-1)} &= {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \\\\ + {\mathbf{H}}^{(k-1)} &= {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R +\end{aligned} +\\] +The powers of \\(u\_k\\) are chosen so they cancel out in the +inner products of interest as will be shown below. + +Let \\(P\_k = P'\\) and define \\(P\_{k-1}\\) using the same equation as for \\(P\_k\\), but using the compressed vectors: +\\[ + P\_{k-1} = {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{G}}^{(k-1)} \rangle} + {\langle {\mathbf{b}}^{(k-1)}, {\mathbf{H}}^{(k-1)} \rangle} + {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{b}}^{(k-1)} \rangle} \cdot Q +\\] +Expanding it in terms of the original \\({\mathbf{a}}\\), \\({\mathbf{b}}\\), +\\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) gives: +\\[ +\begin{aligned} + P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L \cdot u\_k + u\_k^{-1} \cdot {\mathbf{a}}\_R, {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R, {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot Q +\end{aligned} +\\] +Breaking down in simpler products: +\\[ +\begin{aligned} + P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_R \rangle} &{}+{}& u\_k^2 {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + \\\\ + && &{\langle {\mathbf{b}}\_L, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_R \rangle} &{}+{}& u^2\_k {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + u^{-2}\_k {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + \\\\ + && &({\langle {\mathbf{a}}\_L, {\mathbf{b}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_R \rangle})\cdot Q &{}+{}& (u^2\_k {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle}) \cdot Q +\end{aligned} +\\] +We now see that the left two columns in the above equation is the +definition of \\(P\_k\\), while various cross terms on the right are +separated from \\(P\_k\\) by an indeterminate variable \\(u\_k\\). Let’s group all +terms with \\(u^2\_k\\) as \\(L\_k\\) and all terms with \\(u^{-2}\_k\\) as \\(R\_k\\): +\\[ +\begin{aligned} + P\_{k-1} &= P\_k + u^2\_k \cdot L\_k + u^{-2}\_k \cdot R\_k\\\\ + L\_k &= {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} \cdot Q\\\\ + R\_k &= {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle} \cdot Q +\end{aligned} +\\] +If the prover commits to \\(L\_k\\) and \\(R\_k\\) before \\(u\_k\\) is randomly +sampled, then if the statement about compressed vectors is proven to be +true, it will follow that the original statement about uncompressed vectors +is also true with an overwhelming probability. + +We can compress the resulting statement about \\(P\_{k-1}\\) using one more indeterminate +variable \\(u\_{k-1}\\) in the same way as we used \\(u\_k\\) and arrive +to even shorter vectors. We will continue doing so until we end up with +vectors +\\({\mathbf{a}}^{(0)}, {\mathbf{b}}^{(0)}, {\mathbf{G}}^{(0)}, {\mathbf{H}}^{(0)}\\), +each containing one item, and \\(P\_0\\) containing all accumulated cross-terms at each step: +\\[ +\begin{aligned} + P\_0 &= a^{(0)}\_0 G^{(0)}\_0 + b^{(0)}\_0 H^{(0)}\_0 + a^{(0)}\_0 b^{(0)}\_0 Q\\\\ + P\_0 &= P\_k + \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) +\end{aligned} +\\] + +Rewriting the above with the definitions \\(P\_k = P' = P + cwB\\) and \\(Q = wB\\) gives the +final statement: +\\[ + P + c w B = a^{(0)}\_0 G^{(0)}\_0 + b^{(0)}\_0 H^{(0)}\_0 + a^{(0)}\_0 b^{(0)}\_0 wB - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) +\\] + +At this point the prover can transmit two scalars \\(a^{(0)}\_0\\) and +\\(b^{(0)}\_0\\) to the verifier, so they check the final statement directly +by computing both sides of the equation. + +The resulting protocol has \\(\lg n\\) steps of compression where the prover +sends a pair \\((L\_j,R\_j)\\) of points at each step \\(j = k\dots1\\). An +additional and final step involves sending a pair of scalars +\\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the final relation directly. + +Aggregated Range Proof +====================== + +We want to take advantage of the logarithmic size of the inner-product protocol, by creating an aggregated range proof for \\(m\\) values that is smaller than \\(m\\) individual range proofs. + +The aggregation protocol is a multi-party computation protocol, involving \\(m\\) parties (one party per value) and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. + +The Bulletproofs paper outlines two versions of multi-party computation aggregation. In the first approach, the inner-product proof is performed by the dealer, which requires sending the vectors used for the inner-product to the dealer. In the second approach, the inner-product proof is performed using multi-party computation, which sends less data but requires one round for each iteration of the inner-product protocol. We chose to implement the first approach because it requires fewer round trips between parties, which outweighed the slight message size savings of the second approach. + +For more information on how the aggregation protocol works and is implemented, see the [protocol notes](../aggregated_range_proof/index.html). + +The aggregated range proof has the same form as the individual range proof, in that the provers (the parties) still perform the same calculations to prove that \\(t(x) = \langle \mathbf{l}(x), \mathbf{r}(x) \rangle \\) and that \\(t_0, \mathbf{l}(x), \mathbf{r}(x)\\) are correct. The difference is that the challenge values are obtained from the dealer, which generates them by combining commitments from all the parties, and that the calculations of different parties are seperated by different powers of the challenge scalars \\(y\\) and \\(z\\). + +We will explain how one piece of the aggregated proof is generated for party \\(j\\), and then will show how all of the pieces for all of the \\(m\\) parties can be combined into one aggregated proof. + +New notation for aggregated proofs +---------------------------------- + +The subscript \\({(j)}\\) denotes the \\(j\\)th party's share. For instance, \\(v_{(j)}\\) is the \\(v\\) value of the \\(j\\)th party; \\( \mathbf{a}\_{L, (j)}\\) is the \\( \mathbf{a}\_L \\) vector of the \\(j\\)th party; \\(\mathbf{l}\_{(0)}(x)\\) is the \\(\mathbf{l}(x)\\) polynomial of party \\(0\\). + +We use pythonic notation to denote slices of vectors, such that \\(\mathbf{G}\_{[a:b]} = [\mathbf{G}\_{a}, \mathbf{G}\_{a+1}, \dots, \mathbf{G}\_{b-1} ]\\). + +\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n]}}\\), and \\({\mathbf{H}'\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}'}\\), or \\({\mathbf{H}'\_{[j\cdot n : (j+1)n]}}\\). + +\\(z_{(j)}\\) is a scalar offset that is unique to each party \\(j\\), and is defined by \\(z_{(j)} = z^j\\). \\(\mathbf{y}^n\_{(j)}\\) is a length \\(n\\) vector offset that is unique to each party \\(j\\). It is a slice into vector \\(\mathbf{y}^{n \cdot m}\\), and is defined by \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\) + + + +Proving range statements with bit vectors +----------------------------------------- + +Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). + +We want to make statements about \\(v_{(j)}\\) using its bit vector representation, where the statements will be true if and only if \\(v_{(j)}\\) is actually in the expected range. We will not reproduce the steps or explanation here since it is the same as in the [proving range statements with bit vectors](index.html#proving-range-statements-with-bit-vectors) step of the single-value range proof. Here are the final statements for party \\(j\\): + +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ + {\mathbf{a}}\_{L, (j)} \circ {\mathbf{a}}\_{R, (j)} &= {\mathbf{0}} \\\\ + ({\mathbf{a}}\_{L, (j)} - {\mathbf{1}}) - {\mathbf{a}}\_{R, (j)} &= {\mathbf{0}} +\end{aligned} +\\] + +Proving vectors of statements with a single statement +----------------------------------------------------- + +We want to combine the above three statements into a single statement for party \\(j\\), as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally use offsets \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party \\(j\\). Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all \\(m\\) parties. + +First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by vector \\(\mathbf{y}^n\_{(j)}\\): + +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ + {\langle {\mathbf{a}}\_{L, (j)} - {\mathbf{1}} - {\mathbf{a}}\_{R, (j)}, {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \\\\ + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{a}}\_{R, (j)} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 +\end{aligned} +\\] + +The three resulting statements can then be combined in the same way, +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} \\) : +\\[ +\begin{aligned} +z^{2} z\_{(j)} \cdot v_{(j)} +&= + z^{2} z\_{(j)} \cdot {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} \\\\ + &+ z \cdot {\langle {\mathbf{a}}\_{L, (j)} - {\mathbf{1}} - {\mathbf{a}}\_{R, (j)}, {\mathbf{y}}^{n}\_{(j)} \rangle} \\\\ + &+ {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{a}}\_{R, (j)} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} +\end{aligned} +\\] + +Combining inner products +------------------------ + +We combine the terms in the preceding statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here since it is the same as in the [combining inner products](index.html#combining-inner-products) step of the single-value proof. Here is the end result: + +\\[ +\begin{aligned} + \delta_{(j)}(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ + z^{2}z_{(j)} \cdot v_{(j)} + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{L, (j)} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{R, (j)} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} +\end{aligned} +\\] + +Blinding the inner product +-------------------------- + +The prover chooses vectors of blinding factors \\( \mathbf{s}\_{L, (j)}, {\mathbf{s}}\_{R, (j)} \\), and uses them to construct the blinded vector polynomials \\(\mathbf{l}\_{(j)}(x), \mathbf{r}\_{(j)}(x)\\). We will not reproduce the steps or the explanation here since it is the same as in the [blinding the inner product](index.html#blinding-the-inner-product) step of the single-value proof. Here are the final equations for the vector polynomials: + +\\[ +\begin{aligned} + {\mathbf{l}}\_{(j)}(x) &= ({\mathbf{a}}\_{L, (j)} + {\mathbf{s}}\_{L, (j)} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}\_{(j)}(x) &= {\mathbf{y}}^{n}\_{(j)} \circ \left( ({\mathbf{a}}\_{R, (j)} + {\mathbf{s}}\_{R, (j)} x\right) + z {\mathbf{1}}) + z^{2} z_{(j)} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} +\end{aligned} +\\] + +Proving that \\(t(x)\\) is correct +---------------------------------- + +Proving that \\(t\_{(j)}(x)\\) is correct means proving that +\\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correctly formed, and that +\\(t\_{(j)}(x) = {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{r}}\_{(j)}(x) \rangle}\\). + +We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) from all \\(m\\) parties in the following manner: + +\\[ +\begin{aligned} + t(x) &= \sum_{j=0}^{m-1} t\_{(j)}(x)\\\\ + {\mathbf{l}}(x) &= {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x) \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ +\end{aligned} +\\] + +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a random linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party \\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. + +Now instead of having to do \\(m\\) individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties \\(j\\) are correct, we can do the verification with one check: + +\\[ +\begin{aligned} + t(x) \stackrel{?}{=} {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} +\end{aligned} +\\] + +We can do this check using the [inner product proof](index.html#inner-product-proof), in the same way the single-value range proof uses the inner product proof. + +Proving that \\(t_0\\) is correct +--------------------------------- + +Proving that \\(t\_{0, (j)}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The statement each party wants to prove is: + +\\[ +\begin{aligned} + t\_{(j)}(x) B + {\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 z\_{(j)} V_{(j)} + \delta\_{(j)}(y,z) B + x T\_{1, (j)} + x^{2} T\_{2, (j)}\\\\ + \delta\_{(j)}(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} +\end{aligned} +\\] + +If we combine all of the statements about \\(t\_{0, (j)}\\) from all of the \\(j\\) parties by adding them together, then we get: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}t_{(j)}(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_{(j)} V_{(j)} + \sum_{j=0}^{m-1} \delta_{(j)}(y,z) B + x \sum_{j=0}^{m-1} T\_{1, (j)} + x^{2} \sum_{j=0}^{m-1} T\_{2, (j)} +\end{aligned} +\\] + +We can combine the values and commitments by summing them directly. We can do this instead of having to take a random linear combination, because each party's values and commitments are already offset by the values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to that party. + +\\[ +\begin{aligned} + t(x) &= \sum_{j=0}^{m-1} t\_{(j)}(x)\\\\ + {\tilde{t}}(x) &= \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x)\\\\ + T_1 &= \sum_{j=0}^{m-1} T_{1, (j)}\\\\ + T_2 &= \sum_{j=0}^{m-1} T_{2, (j)}\\\\ + \delta(y,z) &= \sum_{j=0}^{m-1} \delta\_{(j)}(y,z)\\\\ +\end{aligned} +\\] + +We can plug the equation for \\(\delta_{(j)}(y,z)\\) into the calculation for \\(\delta(y,z)\\): + +\\[ +\begin{aligned} + \delta(y, z) &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\), we can simplify \\(\delta(y, z)\\): + +\\[ +\begin{aligned} + \delta(y, z) &= (z - z^{2}) \cdot ( + {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[0 : n]} \rangle + + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[n : 2 \cdot n]} \rangle + + \dots + + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n]} \rangle}) - + z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ + &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ +\end{aligned} +\\] + + +Now instead of having to do \\(m\\) individual checks to prove that \\(t\_{0, (j)}\\) for all parties \\(j\\) are correct, we can do the verification with one check using the combined values: + +\\[ +\begin{aligned} + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z\_{(j)} V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Since we know that \\(z\_{(j)} = z^j\\), we can rewrite the equation as follows: + +\\[ +\begin{aligned} + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} \sum_{j=0}^{m-1} z^{j+2} V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - \sum_{j=0}^{m-1} z^{j+3} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct +--------------------------------------------------------------------- + +Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct](index.html#proving-that-mathbflx-mathbfrx-are-correct) step of the single-value range proof. The statement that each party wants to prove is: + +\\[ +\begin{aligned} + {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + {\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} \stackrel{?}{=} -{\widetilde{e}\_{(j)}} {\widetilde{B}} + A_{(j)} + x S_{(j)} - z{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + {\langle z \mathbf{y}^{n}\_{(j)} + z^2 z_{(j)} {\mathbf{2}}^n, {\mathbf{H}'}\_{(j)} \rangle} +\end{aligned} +\\] + +If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the \\(j\\) parties by adding them together, then we get: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} \stackrel{?}{=} + -\sum_{j=0}^{m-1}{\widetilde{e}\_{(j)}} {\widetilde{B}} + + \sum_{j=0}^{m-1}A_{(j)} + x \sum_{j=0}^{m-1}S_{(j)} - + z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + + \sum_{j=0}^{m-1}{\langle z {\mathbf{y}^n_{(j)}} + z^2 z_{(j)} {\mathbf{2}}^n, {\mathbf{H}'\_{(j)}} \rangle} +\end{aligned} +\\] + +We can simplify this expression by making a few observations. We know that: + +\\[ +\begin{aligned} + &{\mathbf{l}}(x) &{}&=&{}& {\mathbf{l}}\_{(0)}(x) & {} &||& {} & {\mathbf{l}}\_{(1)}(x) & {} &||& {} & \dots & {} &||& {} & {\mathbf{l}}\_{(m-1)}(x) \\\\ + &{\mathbf{r}}(x) &{}&=&{}& {\mathbf{r}}\_{(0)}(x) & {} &||& {} & {\mathbf{r}}\_{(1)}(x) & {} &||& {} & \dots & {} &||& {} & {\mathbf{r}}\_{(m-1)}(x) \\\\ + &{\mathbf{G}} &{}&=&{}& {\mathbf{G}}\_{(0)} & {} &||& {} & {\mathbf{G}}\_{(1)} & {} &||& {} & \dots & {} &||& {} & {\mathbf{G}}\_{(m-1)} \\\\ + &{\mathbf{H}'} &{}&=&{}& {\mathbf{H}'}\_{(0)} & {} &||& {} & {\mathbf{H}'}\_{(1)} & {} &||& {} & \dots & {} &||& {} & {\mathbf{H}'}\_{(m-1)} +\end{aligned} +\\] +\\[ +\begin{aligned} + \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\\\ + z_{(j)} &= z^j +\end{aligned} +\\] + +Therefore, we can simplify the following statements: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} &= {\langle {\mathbf{l}}\_{(0)}(x), {\mathbf{G}}\_{(0)} \rangle} + + {\langle {\mathbf{l}}\_{(1)}(x), {\mathbf{G}}\_{(1)} \rangle} + + \dots + + {\langle {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(m-1)} \rangle}\\\\ + &= {\langle {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \rangle} \\\\ + &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} \\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} + &= {\langle {\mathbf{r}}\_{(0)}(x), {\mathbf{H}'}\_{(0)} \rangle} + + {\langle {\mathbf{r}}\_{(1)}(x), {\mathbf{H}'}\_{(1)} \rangle} + + \dots + + {\langle {\mathbf{r}}\_{(m-1)}(x), {\mathbf{H}'}\_{(m-1)} \rangle} \\\\ + &= {\langle {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x), {\mathbf{H}'}\_{(0)} || {\mathbf{H}'}\_{(1)} || \dots || {\mathbf{H}'}\_{(m-1)} \rangle}\\\\ + &= {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} +\end{aligned} +\\] + +We can combine the values and commitments from all the \\(m\\) parties by summing them directly: + +\\[ +\begin{aligned} + {\widetilde{e}} &= \sum_{j=0}^{m-1} {\widetilde{e}\_{(j)}} \\\\ + A &= \sum_{j=0}^{m-1} A_{(j)} \\\\ + S &= \sum_{j=0}^{m-1} S_{(j)} \\\\ +\end{aligned} +\\] + +With these observations, we can simplify the combined \\(m\\)-party statement about \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) into: + +\\[ +\begin{aligned} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{y}^{n \cdot m}}, {\mathbf{H}'} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{2}}^n, {\mathbf{H}'}\_{[j \cdot n : (j+1) \cdot n]} \rangle} +\end{aligned} +\\] + + +[bulletproofs_paper]: https://eprint.iacr.org/2017/1066.pdf \ No newline at end of file diff --git a/docs/range-proof-protocol.md b/docs/range-proof-protocol.md new file mode 100644 index 00000000..0c0d082d --- /dev/null +++ b/docs/range-proof-protocol.md @@ -0,0 +1,190 @@ +The `range_proof` module contains API for producing a range proof for a given integer value. + +Range proof protocol +==================== + +This is a documentation for the internal implementation of a range proof. +You may find the introduction to all the pieces of the protocol in the [notes](../notes/index.html) module. + +The range proof is a zero-knowledge proof of the following relation +\\[ +\operatorname{ZK-PK}\left\\{ + v \in {\mathbb Z\_p} + : v \in [0, 2^n) +\right\\} +\\] where \\(n = 2^{k}\\) is a power of \\(2\\). + +Prover’s algorithm +------------------ + +The protocol begins by computing three commitments: to the value \\(v\\), +to the bits of that value \\(\mathbf{a}\_{L}, \mathbf{a}\_{R}\\), +and to the per-bit blinding factors \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\). + +Each bit \\(a_i\\) is committed twice: as \\(a\_{L,i} \gets a\_i\\) and as \\(a\_{R,i} \gets a_i - 1\\). +Similarly for the blinding factors \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\). + +\\[ +\begin{aligned} +V &\gets \operatorname{Com}(v, {\widetilde{v}}) && = v \cdot B + {\widetilde{v}} \cdot {\widetilde{B}} \\\\ +A &\gets \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}) && = {\langle {\mathbf{a}}\_L, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}} \\\\ +S &\gets \operatorname{Com}({\mathbf{s}}\_{L}, {\mathbf{s}}\_{R}) && = {\langle {\mathbf{s}}\_L, {\mathbf{G}} \rangle} + {\langle {\mathbf{s}}\_R, {\mathbf{H}} \rangle} + {\widetilde{s}} {\widetilde{B}} \\\\ +\end{aligned} +\\] where \\(\widetilde{v}, \widetilde{a}, \widetilde{s}\\) are sampled randomly +from \\({\mathbb Z\_p}\\) and \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\) are sampled randomly from \\({\mathbb Z\_p}^{n}\\). + +The prover adds \\(V, A, S\\) to the protocol transcript +and obtains challenge scalars \\(y,z \in {\mathbb Z\_p}\\). + +Using the challenges and the secret vectors, the prover constructs vector polynomials: +\\[ +\begin{aligned} + {\mathbf{l}}(x) &= {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x \\\\ + {\mathbf{l}}\_{0} &\gets {\mathbf{a}}\_{L} - z {\mathbf{1}} \\\\ + {\mathbf{l}}\_{1} &\gets {\mathbf{s}}\_{L} \\\\ + {\mathbf{r}}\_{0} &\gets {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} \\\\ + {\mathbf{r}}\_{1} &\gets {\mathbf{y}}^{n} \circ {\mathbf{s}}\_{R} +\end{aligned} +\\] + +The inner product of the above vector polynomials is: +\\[ + t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} = t\_{0} + t\_{1} x + t\_{2} x^{2}, +\\] + +The prover uses Karatsuba’s method to compute the coefficients of that polynomial as follows: +\\[ +\begin{aligned} + t\_{0} &\gets {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle}, \\\\ + t\_{2} &\gets {\langle {\mathbf{l}}\_{1}, {\mathbf{r}}\_{1} \rangle}, \\\\ + t\_{1} &\gets {\langle {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1}, {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} \rangle} - t\_{0} - t\_{2} +\end{aligned} +\\] + +The prover commits to the terms \\(t_1, t_2\\): +\\[ +\begin{aligned} +T\_1 &\gets \operatorname{Com}(t\_1, {\tilde{t}\_1}) && = t\_1 \cdot B + {\tilde{t}\_1} \cdot {\widetilde{B}} \\\\ +T\_2 &\gets \operatorname{Com}(t\_2, {\tilde{t}\_2}) && = t\_2 \cdot B + {\tilde{t}\_2} \cdot {\widetilde{B}} +\end{aligned} +\\] where \\(\tilde{t}\_1, \tilde{t}\_2\\) are sampled randomly from \\({\mathbb Z\_p}\\). + +The prover adds \\(T_1, T_2\\) to the protocol transcript, +obtains a challenge scalar \\(x \in {\mathbb Z\_p}\\) +and uses it to evaluate the polynomials \\(\mathbf{l}(x), \mathbf{r}(x), t(x)\\): +\\[ +\begin{aligned} + \mathbf{l} &\gets {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x\\\\ + \mathbf{r} &\gets {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x\\\\ + t(x) &\gets t\_{0} + t\_{1} x + t\_{2} x^{2} +\end{aligned} +\\] + +Next, the prover computes the synthetic blinding factors: +\\[ +\begin{aligned} + {\tilde{t}}(x) &\gets z^{2} {\tilde{v}} + x {\tilde{t}}\_{1} + x^{2} {\tilde{t}}\_{2} \\\\ + \tilde{e} &\gets {\widetilde{a}} + x {\widetilde{s}} +\end{aligned} +\\] + +The prover adds \\(t(x), {\tilde{t}}(x), \tilde{e}\\) to the protocol transcript, +obtains a challenge scalar \\(w \in {\mathbb Z\_p}\\), +and uses it to create a point \\(Q\\): +\\[ + Q \gets w \cdot B +\\] + +The the prover then performs the [inner product argument](../inner_product_proof/index.html) to prove the relation: +\\[ +\operatorname{PK}\left\\{ + ({\mathbf{G}}, {\mathbf{H}}' \in {\mathbb G}^n, P', Q \in {\mathbb G}; {\mathbf{l}}, {\mathbf{r}} \in {\mathbb Z\_p}^n) + : P' = {\langle {\mathbf{l}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}, {\mathbf{H}}' \rangle} + {\langle {\mathbf{l}}, {\mathbf{r}} \rangle} Q +\right\\} +\\] where \\({\mathbf{H}}' = {\mathbf{y}}^{-n} \circ {\mathbf{H}}\\). + +The result of the inner product proof is a list of \\(2k\\) points and \\(2\\) scalars: \\(\\{L\_k, R\_k, \\dots, L\_1, R\_1, a, b\\}\\). + +The complete range proof consists of \\(9+2k\\) 32-byte elements: +\\[ + \\{A, S, T_1, T_2, t(x), {\tilde{t}}(x), \tilde{e}, L\_k, R\_k, \\dots, L\_1, R\_1, a, b\\} +\\] + + +Verifier’s algorithm +-------------------- + +Verifier’s input is the range size \\(n = 2^k\\) (in bits), value commitment \\(V\\), and \\(32 \cdot (9 + 2 k)\\) bytes of the proof data: +\\[ + \\{A, S, T_1, T_2, t(x), {\tilde{t}}(x), \tilde{e}, L\_{k}, R\_{k}, \\dots, L\_1, R\_1, a, b\\} +\\] + +Verifier uses Fiat-Shamir transform to obtain challenges by adding the appropriate data sequentially to the protocol transcript: + +1. \\(V, A, S\\) are added to obtain challenge scalars \\(y,z \in {\mathbb Z\_p}\\), +2. \\(T_1, T_2\\) are added to obtain a challenge \\(x \in {\mathbb Z\_p}\\), +3. \\(t(x), {\tilde{t}}(x), \tilde{e}\\) are added to obtain a challenge \\(w \in {\mathbb Z\_p}\\). + +Verifier computes the following scalars for the [inner product argument](../inner_product_proof/index.html): + +\\[ + \\{u\_{1}^{2}, \dots, u\_{k}^{2}, u\_{1}^{-2}, \dots, u\_{k}^{-2}, s_0, \dots, s_{n-1}\\} +\\] + +The goal of the verifier is to check two equations: + +1. First, verify the constant term of the polynomial \\(t(x)\\) (see [notes](../notes/index.html#proving-that-t_0-is-correct)): + + \\[ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. + \\] + where \\(\delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{n} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{n} \rangle\\). + + Rewriting as a comparison with the identity point: + \\[ + 0 \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2} - t(x) B - {\tilde{t}}(x) {\widetilde{B}}. + \\] + +2. Second, verify the inner product argument for the vectors \\(\mathbf{l}(x), \mathbf{r}(x)\\) that form the \\(t(x)\\) (see [inner-product protocol](../inner_product_proof/index.html#verification-equation)) + + \\[ + P' \overset ? = {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{y}^{-n}} \circ (b /{\mathbf{s}}), {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right). + \\] + + Rewriting as a comparison with the identity point and expanding \\(Q = wB\\) and \\(P' = P + t(x) wB\\) as [needed for transition to the inner-product protocol](../notes/index.html#inner-product-proof): + + \\[ + 0 \overset ? = P + t(x) wB - {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} - {\langle {\mathbf{y}^{-n}} \circ (b /{\mathbf{s}}), {\mathbf{H}} \rangle} - abwB + \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right), + \\] + where the [definition](../notes/index.html#proving-that-mathbflx-mathbfrx-are-correct) of \\(P\\) is: + \\[ + P = -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{1}} + z^2 {\mathbf{y}^{-n}} \circ {\mathbf{2}}^n, {\mathbf{H}} \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}. +\\] + +Verifier combines two equations in one by sampling a random factor \\(c \\; {\xleftarrow{\\$}} \\; {\mathbb Z\_p}\\), +multiplying the first equation by \\(c\\), and adding it with the second equation. + +Finally, verifier groups all scalars per each point and performs a single multiscalar multiplication over \\((7 + 2n + 2k)\\) points: + +\\[ +\begin{aligned} +0 \quad \stackrel{?}{=} & \quad 1 \cdot A \\\\ + + & \quad x \cdot S \\\\ + + & \quad cz^2 \cdot V \\\\ + + & \quad cx \cdot T_1 \\\\ + + & \quad cx^2 \cdot T_2 \\\\ + + & \quad \Big(w \big(t(x) - ab\big) + c \big(\delta(y,z) - t(x)\big) \Big) \cdot B\\\\ + + & \quad (-{\widetilde{e}} - c{\tilde{t}}(x)) \cdot \widetilde{B} \\\\ + + & \quad {\langle {-z\mathbf{1} - a\mathbf{s}}, {\mathbf{G}} \rangle}\\\\ + + & \quad {\langle {z\mathbf{1} + {\mathbf{y}}^{-n} \circ (z^2\mathbf{2}^n - b/{\mathbf{s}})}, {\mathbf{H}} \rangle}\\\\ + + & \quad {\langle [u_{1}^2, \dots, u_{k}^2 ], [L_1, \dots, L_{k}] \rangle}\\\\ + + & \quad {\langle [u_{1}^{-2}, \dots, u_{k}^{-2} ], [R_1, \dots, R_{k}] \rangle} +\end{aligned} +\\] where \\(1/{\mathbf{s}}\\) are inverses of \\(\mathbf{s}\\), computed as a reversed list of \\(\mathbf{s}\\). + + + + + + diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 00000000..9ee2e268 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly-2018-04-16 diff --git a/src/generators.rs b/src/generators.rs new file mode 100644 index 00000000..129c2064 --- /dev/null +++ b/src/generators.rs @@ -0,0 +1,198 @@ +//! The `generators` module contains API for producing a +//! set of generators for a rangeproof. +//! +//! +//! # Example +//! +//! ``` +//! # extern crate ristretto_bulletproofs; +//! # use ristretto_bulletproofs::{PedersenGenerators,Generators}; +//! # fn main() { +//! let generators = Generators::new(PedersenGenerators::default(), 64,1); +//! let view = generators.all(); +//! let G0 = view.G[0]; +//! let H0 = view.H[0]; +//! +//! # } +//! ``` + +#![allow(non_snake_case)] +#![deny(missing_docs)] + +// XXX we should use Sha3 everywhere + +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use sha2::{Digest, Sha512}; + +/// The `GeneratorsChain` creates an arbitrary-long sequence of orthogonal generators. +/// The sequence can be deterministically produced starting with an arbitrary point. +struct GeneratorsChain { + next_point: RistrettoPoint, +} + +impl GeneratorsChain { + /// Creates a chain of generators, determined by the hash of `label`. + fn new(label: &[u8]) -> Self { + let mut hash = Sha512::default(); + hash.input(b"GeneratorsChainInit"); + hash.input(label); + let next_point = RistrettoPoint::from_hash(hash); + GeneratorsChain { next_point } + } +} + +impl Default for GeneratorsChain { + fn default() -> Self { + Self::new(&[]) + } +} + +impl Iterator for GeneratorsChain { + type Item = RistrettoPoint; + fn next(&mut self) -> Option { + let current_point = self.next_point; + let mut hash = Sha512::default(); + hash.input(b"GeneratorsChainNext"); + hash.input(current_point.compress().as_bytes()); + self.next_point = RistrettoPoint::from_hash(hash); + Some(current_point) + } +} + +/// `Generators` contains all the generators needed for aggregating `m` range proofs of `n` bits each. +#[derive(Clone)] +pub struct Generators { + /// Number of bits in a rangeproof + pub n: usize, + /// Number of values or parties + pub m: usize, + /// Bases for Pedersen commitments + pedersen_generators: PedersenGenerators, + /// Per-bit generators for the bit values + G: Vec, + /// Per-bit generators for the bit blinding factors + H: Vec, +} + +/// Represents a view into `Generators` relevant to a specific range proof. +#[derive(Copy, Clone)] +pub struct GeneratorsView<'a> { + /// Bases for Pedersen commitments + pub pedersen_generators: &'a PedersenGenerators, + /// Per-bit generators for the bit values + pub G: &'a [RistrettoPoint], + /// Per-bit generators for the bit blinding factors + pub H: &'a [RistrettoPoint], +} + +/// Represents a pair of base points for Pedersen commitments. +#[derive(Copy, Clone)] +pub struct PedersenGenerators { + /// Base for the committed value + pub B: RistrettoPoint, + + /// Base for the blinding factor + pub B_blinding: RistrettoPoint, +} + +impl PedersenGenerators { + /// Constructs a pair of Pedersen generators + /// from a pair of generators provided by the user. + pub fn new(B: RistrettoPoint, B_blinding: RistrettoPoint) -> Self { + PedersenGenerators { B, B_blinding } + } + + /// Creates a Pedersen commitment using the value scalar and a blinding factor. + pub fn commit(&self, value: Scalar, blinding: Scalar) -> RistrettoPoint { + ristretto::multiscalar_mul(&[value, blinding], &[self.B, self.B_blinding]) + } +} + +impl Default for PedersenGenerators { + fn default() -> Self { + PedersenGenerators { + B: GeneratorsChain::new(b"Bulletproofs.Generators.B") + .next() + .unwrap(), + B_blinding: GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding") + .next() + .unwrap(), + } + } +} + +impl Generators { + /// Creates generators for `m` range proofs of `n` bits each. + pub fn new(pedersen_generators: PedersenGenerators, n: usize, m: usize) -> Self { + let G = GeneratorsChain::new(pedersen_generators.B.compress().as_bytes()) + .take(n * m) + .collect(); + let H = GeneratorsChain::new(pedersen_generators.B_blinding.compress().as_bytes()) + .take(n * m) + .collect(); + + Generators { + n, + m, + pedersen_generators: pedersen_generators, + G, + H, + } + } + + /// Returns a view into the entirety of the generators. + pub fn all(&self) -> GeneratorsView { + GeneratorsView { + pedersen_generators: &self.pedersen_generators, + G: &self.G[..], + H: &self.H[..], + } + } + + /// Returns j-th share of generators, with an appropriate + /// slice of vectors G and H for the j-th range proof. + pub fn share(&self, j: usize) -> GeneratorsView { + let lower = self.n * j; + let upper = self.n * (j + 1); + GeneratorsView { + pedersen_generators: &self.pedersen_generators, + G: &self.G[lower..upper], + H: &self.H[lower..upper], + } + } +} + +#[cfg(test)] +mod tests { + extern crate hex; + use super::*; + + #[test] + fn rangeproof_generators() { + let n = 2; + let m = 3; + let gens = Generators::new(PedersenGenerators::default(), n, m); + + // The concatenation of shares must be the full generator set + assert_eq!( + [gens.all().G[..n].to_vec(), gens.all().H[..n].to_vec()], + [gens.share(0).G[..].to_vec(), gens.share(0).H[..].to_vec()] + ); + assert_eq!( + [ + gens.all().G[n..][..n].to_vec(), + gens.all().H[n..][..n].to_vec(), + ], + [gens.share(1).G[..].to_vec(), gens.share(1).H[..].to_vec()] + ); + assert_eq!( + [ + gens.all().G[2 * n..][..n].to_vec(), + gens.all().H[2 * n..][..n].to_vec(), + ], + [gens.share(2).G[..].to_vec(), gens.share(2).H[..].to_vec()] + ); + } +} diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs new file mode 100644 index 00000000..d7f1a5fd --- /dev/null +++ b/src/inner_product_proof.rs @@ -0,0 +1,345 @@ +#![allow(non_snake_case)] + +#![doc(include = "../docs/inner-product-protocol.md")] + +use std::borrow::Borrow; +use std::iter; + +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; + +use proof_transcript::ProofTranscript; + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct InnerProductProof { + pub(crate) L_vec: Vec, + pub(crate) R_vec: Vec, + pub(crate) a: Scalar, + pub(crate) b: Scalar, +} + +impl InnerProductProof { + /// Create an inner-product proof. + /// + /// The proof is created with respect to the bases \\(G\\), \\(H'\\), + /// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\). + /// + /// The `verifier` is passed in as a parameter so that the + /// challenges depend on the *entire* transcript (including parent + /// protocols). + pub fn create( + verifier: &mut ProofTranscript, + Q: &RistrettoPoint, + Hprime_factors: I, + mut G_vec: Vec, + mut H_vec: Vec, + mut a_vec: Vec, + mut b_vec: Vec, + ) -> InnerProductProof + where + I: IntoIterator, + I::Item: Borrow, + { + // Create slices G, H, a, b backed by their respective + // vectors. This lets us reslice as we compress the lengths + // of the vectors in the main loop below. + let mut G = &mut G_vec[..]; + let mut H = &mut H_vec[..]; + let mut a = &mut a_vec[..]; + let mut b = &mut b_vec[..]; + + let mut n = G.len(); + + // All of the input vectors must have the same length. + assert_eq!(G.len(), n); + assert_eq!(H.len(), n); + assert_eq!(a.len(), n); + assert_eq!(b.len(), n); + + // XXX save these scalar mults by unrolling them into the + // first iteration of the loop below + for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) { + *H_i = (&*H_i) * h_i.borrow(); + } + + let lg_n = n.next_power_of_two().trailing_zeros() as usize; + let mut L_vec = Vec::with_capacity(lg_n); + let mut R_vec = Vec::with_capacity(lg_n); + + while n != 1 { + n = n / 2; + let (a_L, a_R) = a.split_at_mut(n); + let (b_L, b_R) = b.split_at_mut(n); + let (G_L, G_R) = G.split_at_mut(n); + let (H_L, H_R) = H.split_at_mut(n); + + let c_L = inner_product(&a_L, &b_R); + let c_R = inner_product(&a_R, &b_L); + + let L = ristretto::vartime::multiscalar_mul( + a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), + G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), + ); + + let R = ristretto::vartime::multiscalar_mul( + a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), + G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), + ); + + L_vec.push(L); + R_vec.push(R); + + verifier.commit(L.compress().as_bytes()); + verifier.commit(R.compress().as_bytes()); + + let u = verifier.challenge_scalar(); + let u_inv = u.invert(); + + for i in 0..n { + a_L[i] = a_L[i] * u + u_inv * a_R[i]; + b_L[i] = b_L[i] * u_inv + u * b_R[i]; + G_L[i] = ristretto::vartime::multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); + H_L[i] = ristretto::vartime::multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]); + } + + a = a_L; + b = b_L; + G = G_L; + H = H_L; + } + + return InnerProductProof { + L_vec: L_vec, + R_vec: R_vec, + a: a[0], + b: b[0], + }; + } + + /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication + /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. + pub(crate) fn verification_scalars( + &self, + transcript: &mut ProofTranscript, + ) -> (Vec, Vec, Vec) { + let lg_n = self.L_vec.len(); + // XXX FIXME: we need to pass in `n` explicitly and assert that lg_n == lg(n) + // This is necessary for avoiding DoS when a short log(n) vector + // causes the O(n) memory usage in the last loop. + let n = 1 << lg_n; + + // 1. Recompute x_k,...,x_1 based on the proof transcript + + let mut challenges = Vec::with_capacity(lg_n); + for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { + // XXX maybe avoid this compression when proof ser/de is sorted out + transcript.commit(L.compress().as_bytes()); + transcript.commit(R.compress().as_bytes()); + + challenges.push(transcript.challenge_scalar()); + } + + // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 + + let mut challenges_inv = challenges.clone(); + let allinv = Scalar::batch_invert(&mut challenges_inv); + + // 3. Compute u_i^2 and (1/u_i)^2 + + for i in 0..lg_n { + // XXX missing square fn upstream + challenges[i] = challenges[i] * challenges[i]; + challenges_inv[i] = challenges_inv[i] * challenges_inv[i]; + } + let challenges_sq = challenges; + let challenges_inv_sq = challenges_inv; + + // 4. Compute s values inductively. + + let mut s = Vec::with_capacity(n); + s.push(allinv); + for i in 1..n { + let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; + let k = 1 << lg_i; + // The challenges are stored in "creation order" as [u_k,...,u_1], + // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i + let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; + s.push(s[i - k] * u_lg_i_sq); + } + + (challenges_sq, challenges_inv_sq, s) + } + + /// This method is for testing that proof generation work, + /// but for efficiency the actual protocols would use `verification_scalars` + /// method to combine inner product verification with other checks + /// in a single multiscalar multiplication. + #[allow(dead_code)] + pub fn verify( + &self, + transcript: &mut ProofTranscript, + Hprime_factors: I, + P: &RistrettoPoint, + Q: &RistrettoPoint, + G: &[RistrettoPoint], + H: &[RistrettoPoint], + ) -> Result<(), ()> + where + I: IntoIterator, + I::Item: Borrow, + { + let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript); + + let a_times_s = s.iter().map(|s_i| self.a * s_i); + + // 1/s[i] is s[!i], and !i runs from n-1 to 0 as i runs from 0 to n-1 + let inv_s = s.iter().rev(); + + let h_times_b_div_s = Hprime_factors + .into_iter() + .zip(inv_s) + .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); + + let neg_u_sq = u_sq.iter().map(|ui| -ui); + let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui); + + let expect_P = ristretto::vartime::multiscalar_mul( + iter::once(self.a * self.b) + .chain(a_times_s) + .chain(h_times_b_div_s) + .chain(neg_u_sq) + .chain(neg_u_inv_sq), + iter::once(Q) + .chain(G.iter()) + .chain(H.iter()) + .chain(self.L_vec.iter()) + .chain(self.R_vec.iter()), + ); + + if expect_P == *P { + Ok(()) + } else { + Err(()) + } + } +} + +/// Computes an inner product of two vectors +/// \\[ +/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. +/// \\] +/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { + let mut out = Scalar::zero(); + if a.len() != b.len() { + panic!("inner_product(a,b): lengths of vectors do not match"); + } + for i in 0..a.len() { + out += a[i] * b[i]; + } + out +} + +#[cfg(test)] +mod tests { + use super::*; + + use rand::OsRng; + use sha2::Sha512; + use util; + + fn test_helper_create(n: usize) { + let mut rng = OsRng::new().unwrap(); + + use generators::{Generators, PedersenGenerators}; + let gens = Generators::new(PedersenGenerators::default(), n, 1); + let G = gens.share(0).G.to_vec(); + let H = gens.share(0).H.to_vec(); + + // Q would be determined upstream in the protocol, so we pick a random one. + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + + // a and b are the vectors for which we want to prove c = + let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); + let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); + let c = inner_product(&a, &b); + + // y_inv is (the inverse of) a random challenge + let y_inv = Scalar::random(&mut rng); + + // P would be determined upstream, but we need a correct P to check the proof. + // + // To generate P = + + Q, compute + // P = + + Q, + // where b' = b \circ y^(-n) + let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi); + // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime + let a_prime = a.iter().cloned(); + + let P = ristretto::vartime::multiscalar_mul( + a_prime.chain(b_prime).chain(iter::once(c)), + G.iter().chain(H.iter()).chain(iter::once(&Q)), + ); + + let mut verifier = ProofTranscript::new(b"innerproducttest"); + let proof = InnerProductProof::create( + &mut verifier, + &Q, + util::exp_iter(y_inv), + G.clone(), + H.clone(), + a.clone(), + b.clone(), + ); + + let mut verifier = ProofTranscript::new(b"innerproducttest"); + assert!( + proof + .verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H) + .is_ok() + ); + } + + #[test] + fn make_ipp_1() { + test_helper_create(1); + } + + #[test] + fn make_ipp_2() { + test_helper_create(2); + } + + #[test] + fn make_ipp_4() { + test_helper_create(4); + } + + #[test] + fn make_ipp_32() { + test_helper_create(32); + } + + #[test] + fn make_ipp_64() { + test_helper_create(64); + } + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } +} diff --git a/src/lib.rs b/src/lib.rs index e21b08f4..6941eebf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,103 +1,45 @@ +#![cfg_attr(feature = "bench", feature(test))] +#![feature(nll)] #![feature(test)] +#![feature(external_doc)] +#![doc(include = "../README.md")] +#![doc(html_logo_url = "https://doc.dalek.rs/assets/dalek-logo-clear.png")] +//! Note that docs will only build on nightly Rust until +//! [RFC 1990 stabilizes](https://github.com/rust-lang/rust/issues/44732). + +extern crate byteorder; extern crate curve25519_dalek; -extern crate sha2; -extern crate test; extern crate rand; -use std::iter; -use curve25519_dalek::ristretto::{RistrettoPoint}; -use curve25519_dalek::ristretto; -use curve25519_dalek::traits::Identity; -use sha2::{Digest, Sha256}; -use curve25519_dalek::scalar::Scalar; -use rand::OsRng; - - -struct RangeProof { - -} - -impl RangeProof { - pub fn generate_proof(v: u64, len: usize, a: &RistrettoPoint, b: &RistrettoPoint) -> RangeProof { - let mut rng: OsRng = OsRng::new().unwrap(); - - let b_vec = make_generators(b, len); - let a_vec = make_generators(a, len); - - let alpha = RistrettoPoint::random(&mut rng); - let mut big_a = alpha.clone(); - for i in 0..len { - let v_i = (v >> i) & 1; - if v_i == 0 { - big_a -= a_vec[i]; - } else { - big_a += b_vec[i]; - } - } - - let points_iter = iter::once(a).chain(b_vec.iter()).chain(a_vec.iter()); - let randomness: Vec<_> = (0..2*len+1).map(|_| Scalar::random(&mut rng)).collect(); - let big_s = ristretto::multiscalar_mult(&randomness, points_iter); - - let _rho = &randomness[0]; - let _s_l = &randomness[1..len+1]; - let _s_r = &randomness[len+1..2*len+1]; - - let (_y, _z) = commit(&big_a, &big_s); - - unimplemented!() - } - - pub fn verify_proof() -> Result<(), ()> { - unimplemented!() - } -} - -pub fn make_generators(point: &RistrettoPoint, len: usize) - -> Vec -{ - let mut generators = vec![RistrettoPoint::identity(); len]; - - generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); - for i in 1..len { - let prev = generators[i-1].compress(); - generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); - } - generators -} - -pub fn commit(a: &RistrettoPoint, s: &RistrettoPoint) -> (RistrettoPoint, RistrettoPoint) { - let mut y_digest = Sha256::new(); - y_digest.input(a.compress().as_bytes()); - y_digest.input(s.compress().as_bytes()); - let y = RistrettoPoint::hash_from_bytes::(&y_digest.result()); - - let mut z_digest = Sha256::new(); - z_digest.input(a.compress().as_bytes()); - z_digest.input(s.compress().as_bytes()); - z_digest.input(y.compress().as_bytes()); - let z = RistrettoPoint::hash_from_bytes::(&z_digest.result()); +extern crate sha2; +extern crate subtle; +extern crate tiny_keccak; - (y, z) -} +#[macro_use] +extern crate serde_derive; #[cfg(test)] -mod tests { - use super::*; - #[test] - fn test_make_generators() { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - println!("{:?}", make_generators(RISTRETTO_BASEPOINT_POINT, 20)); - } -} - -mod bench { - use super::*; - use test::Bencher; +extern crate test; - #[bench] - fn benchmark_make_generators(b: &mut Bencher) { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - b.iter(|| make_generators(RISTRETTO_BASEPOINT_POINT, 100)); - } +#[cfg(test)] +extern crate bincode; + +mod util; + +#[doc(include = "../docs/notes.md")] +mod notes {} +mod generators; +mod inner_product_proof; +mod proof_transcript; +mod range_proof; + +pub use generators::{Generators, GeneratorsView, PedersenGenerators}; +pub use proof_transcript::ProofTranscript; +pub use range_proof::RangeProof; + +/// API for performing the aggregate-proving multiparty computation protocol. +pub mod aggregation { + pub use range_proof::dealer; + pub use range_proof::messages; + pub use range_proof::party; } diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs new file mode 100644 index 00000000..d7b48aa0 --- /dev/null +++ b/src/proof_transcript.rs @@ -0,0 +1,316 @@ +#![deny(missing_docs)] + +//! The `proof_transcript` module contains API designed to allow +//! implementation of non-interactive proofs as if they were +//! interactive, using the Fiat-Shamir transform. + +use curve25519_dalek::scalar::Scalar; + +use tiny_keccak::Keccak; + +use byteorder::{ByteOrder, LittleEndian}; + +/// The `ProofTranscript` struct represents a transcript of messages +/// between a prover and verifier engaged in a public-coin argument. +/// +/// The prover can send messages to the `ProofTranscript` object, +/// which absorbs them into a sponge, and can then request challenges, +/// which are derived from all previous messages. +/// +/// The verifier can then construct its own `ProofTranscript` +/// object, send it (what should be) the same messages, and request +/// (what should be) the same challenge values. +/// +/// To create a `ProofTranscript` object, use `ProofTranscript::new()` +/// at the outermost protocol layer. A `&mut` reference to this +/// object can then be passed to any sub-protocols, making it easy to +/// ensure that their challenge values are bound to the *entire* proof +/// transcript, not just the sub-protocol. +/// +/// ## Warning +/// +/// Internally, the `ProofTranscript` uses ad-hoc duplex construction +/// using Keccak that absorbs incoming messages and squeezes challenges. +/// There is no security analysis yet, so it is **only suitable for testing**. +/// +/// ## Example +/// +/// ``` +/// # extern crate curve25519_dalek; +/// # extern crate ristretto_bulletproofs; +/// # use ristretto_bulletproofs::ProofTranscript; +/// # fn main() { +/// +/// use curve25519_dalek::constants; +/// let B = &constants::RISTRETTO_BASEPOINT_TABLE; +/// +/// let mut transcript = ProofTranscript::new(b"MyProofName: Don't copypaste this"); +/// +/// // Send "some message" to the verifier +/// transcript.commit(b"some message"); +/// +/// // Extract a challenge scalar +/// let x = transcript.challenge_scalar(); +/// +/// // Send x * B to the verifier +/// let P = B * &x; +/// transcript.commit(P.compress().as_bytes()); +/// # } +/// ``` +#[derive(Clone)] +pub struct ProofTranscript { + hash: Keccak, + rate: usize, + write_offset: usize, // index within a block where the message must be absorbed +} + +impl ProofTranscript { + // Implementation notes + // + // The implementation has 3 layers: + // 1. commit/challenge - take input/output buffers <64K, responsible for disambiguation (length prefixing) + // 2. write/read - take arbitrary buffers, responsible for splitting data over Keccak-f invocations and padding + // 3. absorb/squeeze - actual sponge operations, outer layers ensure that absorb/squeeze do not perform unnecessary permutation + // + + /// Begin a new, empty proof transcript, using the given `label` + /// for domain separation. + pub fn new(label: &[u8]) -> Self { + let mut ro = ProofTranscript { + // NOTE: if you change the security parameter, also change the rate below + hash: Keccak::new_shake128(), + rate: 1600 / 8 - (2 * 128 / 8), // 168 bytes + write_offset: 0, + }; + // We will bump the version prefix each time we + // make a breaking change in order to disambiguate + // from the previous versions of this implementation. + ro.commit(b"ProofTranscript v2"); + ro.commit(label); + ro + } + + /// Commit a `input` to the proof transcript. + /// + /// # Note + /// + /// Each input must be ≤ than the number of bytes + /// returned by `max_commit_size()`. + pub fn commit(&mut self, input: &[u8]) { + let len = input.len(); + if len > (u16::max_value() as usize) { + panic!("Committed input must be less than 64Kb!"); + } + self.write_u16(len as u16); + self.write(input); + } + + /// Commit a `u64` to the proof transcript. + /// + /// This is a convenience method that commits the little-endian + /// bytes of `value`. + pub fn commit_u64(&mut self, value: u64) { + let mut value_bytes = [0u8; 8]; + LittleEndian::write_u64(&mut value_bytes, value); + + self.commit(&value_bytes); + } + + /// Extracts an arbitrary-sized challenge byte slice. + /// + /// Note: each call performs at least one Keccak permutation, + /// so if you need to read multiple logical challenges at once, + /// you should read a bigger slice in one call for minimal overhead. + pub fn challenge_bytes(&mut self, output: &mut [u8]) { + let len = output.len(); + if output.len() > (u16::max_value() as usize) { + panic!("Challenge output must be less than 64Kb!"); + } + // Note: when reading, length prefix N is followed by keccak padding 10*1 + // as if empty message was written; when writing, length prefix N is followed + // by N bytes followed by keccak padding 10*1. + // This creates ambiguity only for case N=0 (empty write or empty read), + // which is safe as no information is actually transmitted in either direction. + self.write_u16(len as u16); + self.read(output); + } + + /// Extracts a challenge scalar. + /// + /// This is a convenience method that extracts 64 bytes and + /// reduces modulo the group order. + /// + /// Note: each call performs at least one Keccak permutation, + /// so if you need to read multiple challenge scalars, + /// for the minimal overhead you should read `n*64` bytes + /// using the `challenge_bytes` method and reduce each + /// 64-byte window into a scalar yourself. + pub fn challenge_scalar(&mut self) -> Scalar { + let mut buf = [0u8; 64]; + self.challenge_bytes(&mut buf); + Scalar::from_bytes_mod_order_wide(&buf) + } + + /// Internal API: writes 2-byte length prefix. + fn write_u16(&mut self, integer: u16) { + let mut intbuf = [0u8; 2]; + LittleEndian::write_u16(&mut intbuf, integer); + self.write(&intbuf); + } + + /// Internal API: writes arbitrary byte slice + /// splitting it over multiple duplex calls if needed. + fn write(&mut self, mut input: &[u8]) { + // `write` can be called multiple times. + // If we overflow the available room (`rate-1` at most) + // we absorb what we can, add Keccak padding, permute and continue. + let mut room = self.rate - 1 - self.write_offset; // 1 byte is reserved for keccak padding 10*1. + while input.len() > room { + self.hash.absorb(&input[..room]); + self.hash.pad(); + self.hash.fill_block(); + self.write_offset = 0; + input = &input[room..]; + room = self.rate - 1; + } + self.hash.absorb(input); + self.write_offset += input.len(); // could end up == (rate-1) + } + + /// Internal API: reads arbitrary byte slice + /// splitting it over multiple duplex calls if needed. + fn read(&mut self, output: &mut [u8]) { + // Note 1: `read` is called only once after `write`, so we do + // not need to support multiple reads from some offset. + // We only need to complete the pending duplex call by padding and permuting. + // Note 2: Since we hash in the total output buffer length, + // we can use default squeeze behaviour w/o simulating blank inputs: + // the resulting byte-stream will be disambiguated by that length prefix and keccak padding. + self.hash.pad(); + self.hash.fill_block(); + self.write_offset = 0; + self.hash.squeeze(output); + } +} + +#[cfg(test)] +mod tests { + extern crate hex; + use super::*; + + #[test] + fn challenges_must_be_random() { + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"test"); + let mut ch = [0u8; 32]; + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "dec44a90f423c15874f7c0afaf62cc6cc0987bf428202cb3508fc7d7c9b5b30a" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "f83256ef4964d71ec6f2dd2f79db70820c781bd8c3d1fceec7cbfa4965d4e530" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "962f9ef161604c5dcbe3387773b293a0e27a6e6ee14ec5d9f6c78a45c36fc0e1" + ); + } + + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"test"); + let mut ch = [0u8; 32]; + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "dec44a90f423c15874f7c0afaf62cc6cc0987bf428202cb3508fc7d7c9b5b30a" + ); + ro.commit(b"extra commitment"); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "edf99afca6c21e4240f33826d60cb1b7c5d59d3dd363d2928bab7b8f94d24eaa" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "a42eabb9d1c9c73dc8c33c0933cee8d5fabd48fcab686d9fcb8f1680841e4369" + ); + } + } + + #[test] + fn inputs_are_disambiguated_by_length_prefix() { + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "3a941266af4275d5"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg1"); + ro.commit(b"msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "644d94299bcc5590"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg"); + ro.commit(b"1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "14f18d260e679f9a"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"ms"); + ro.commit(b"g1ms"); + ro.commit(b"g2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "09dccc9d7dfa6f37"); + } + } + } + + #[test] + fn outputs_are_disambiguated_by_length_prefix() { + let mut ro = ProofTranscript::new(b"TestProtocol"); + { + let mut ch = [0u8; 16]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "60890c8d774932db1aba587941cbffca"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "bb9308c7d34769ae2a3c040394efb2ab"); + } + + let mut ro = ProofTranscript::new(b"TestProtocol"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "cc76fac64922bc58"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "d259804aae5c3246"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "6d3a732156286895"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "2165dcd38764b5ae"); + } + } + +} diff --git a/src/range_proof/dealer.rs b/src/range_proof/dealer.rs new file mode 100644 index 00000000..c4ec6c51 --- /dev/null +++ b/src/range_proof/dealer.rs @@ -0,0 +1,321 @@ +use rand::Rng; + +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::Identity; + +use generators::GeneratorsView; +use inner_product_proof; +use proof_transcript::ProofTranscript; +use range_proof::RangeProof; + +use util; + +use super::messages::*; + +/// Dealer is an entry-point API for setting up a dealer +pub struct Dealer {} + +impl Dealer { + /// Creates a new dealer coordinating `m` parties proving `n`-bit ranges. + pub fn new<'a, 'b>( + gens: GeneratorsView<'b>, + n: usize, + m: usize, + transcript: &'a mut ProofTranscript, + ) -> Result, &'static str> { + if !n.is_power_of_two() || n > 64 { + return Err("n is not valid: must be a power of 2, and less than or equal to 64"); + } + if !m.is_power_of_two() { + return Err("m is not valid: must be a power of 2"); + } + + // At the end of the protocol, the dealer will attempt to + // verify the proof, and if it fails, determine which party's + // shares were invalid. + // + // However, verifying the proof requires either knowledge of + // all of the challenges, or a copy of the initial transcript + // state. + // + // The dealer has all of the challenges, but using them for + // verification would require duplicating the verification + // logic. Instead, we keep a copy of the initial transcript + // state. + let initial_transcript = transcript.clone(); + + // Commit to aggregation parameters + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + + Ok(DealerAwaitingValueCommitments { + n, + m, + transcript, + initial_transcript, + gens, + }) + } +} + +/// The initial dealer state, waiting for the parties to send value +/// commitments. +pub struct DealerAwaitingValueCommitments<'a, 'b> { + n: usize, + m: usize, + transcript: &'a mut ProofTranscript, + /// The dealer keeps a copy of the initial transcript state, so + /// that it can attempt to verify the aggregated proof at the end. + initial_transcript: ProofTranscript, + gens: GeneratorsView<'b>, +} + +impl<'a, 'b> DealerAwaitingValueCommitments<'a, 'b> { + /// Combines commitments and computes challenge variables. + pub fn receive_value_commitments( + self, + value_commitments: &[ValueCommitment], + ) -> Result<(DealerAwaitingPolyCommitments<'a, 'b>, ValueChallenge), &'static str> { + if self.m != value_commitments.len() { + return Err("Length of value commitments doesn't match expected length m"); + } + + let mut A = RistrettoPoint::identity(); + let mut S = RistrettoPoint::identity(); + + for commitment in value_commitments.iter() { + // Commit each V individually + self.transcript.commit(commitment.V.compress().as_bytes()); + + // Commit sums of As and Ss. + A += commitment.A; + S += commitment.S; + } + + self.transcript.commit(A.compress().as_bytes()); + self.transcript.commit(S.compress().as_bytes()); + + let y = self.transcript.challenge_scalar(); + let z = self.transcript.challenge_scalar(); + let value_challenge = ValueChallenge { y, z }; + + Ok(( + DealerAwaitingPolyCommitments { + n: self.n, + m: self.m, + transcript: self.transcript, + initial_transcript: self.initial_transcript, + gens: self.gens, + value_challenge: value_challenge.clone(), + }, + value_challenge, + )) + } +} + +pub struct DealerAwaitingPolyCommitments<'a, 'b> { + n: usize, + m: usize, + transcript: &'a mut ProofTranscript, + initial_transcript: ProofTranscript, + gens: GeneratorsView<'b>, + value_challenge: ValueChallenge, +} + +impl<'a, 'b> DealerAwaitingPolyCommitments<'a, 'b> { + pub fn receive_poly_commitments( + self, + poly_commitments: &[PolyCommitment], + ) -> Result<(DealerAwaitingProofShares<'a, 'b>, PolyChallenge), &'static str> { + if self.m != poly_commitments.len() { + return Err("Length of poly commitments doesn't match expected length m"); + } + + // Commit sums of T1s and T2s. + let mut T1 = RistrettoPoint::identity(); + let mut T2 = RistrettoPoint::identity(); + for commitment in poly_commitments.iter() { + T1 += commitment.T_1; + T2 += commitment.T_2; + } + self.transcript.commit(T1.compress().as_bytes()); + self.transcript.commit(T2.compress().as_bytes()); + + let x = self.transcript.challenge_scalar(); + let poly_challenge = PolyChallenge { x }; + + Ok(( + DealerAwaitingProofShares { + n: self.n, + m: self.m, + transcript: self.transcript, + initial_transcript: self.initial_transcript, + gens: self.gens, + value_challenge: self.value_challenge, + poly_challenge: poly_challenge.clone(), + }, + poly_challenge, + )) + } +} + +pub struct DealerAwaitingProofShares<'a, 'b> { + n: usize, + m: usize, + transcript: &'a mut ProofTranscript, + initial_transcript: ProofTranscript, + gens: GeneratorsView<'b>, + value_challenge: ValueChallenge, + poly_challenge: PolyChallenge, +} + +impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { + /// Assembles proof shares into an `RangeProof`. + /// + /// Used as a helper function by `receive_trusted_shares` (which + /// just hands back the result) and `receive_shares` (which + /// validates the proof shares. + fn assemble_shares(&mut self, proof_shares: &[ProofShare]) -> Result { + if self.m != proof_shares.len() { + return Err("Length of proof shares doesn't match expected length m"); + } + + let A = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |A, ps| { + A + ps.value_commitment.A + }); + let S = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |S, ps| { + S + ps.value_commitment.S + }); + let T_1 = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |T_1, ps| { + T_1 + ps.poly_commitment.T_1 + }); + let T_2 = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |T_2, ps| { + T_2 + ps.poly_commitment.T_2 + }); + let t = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.t_x); + let t_x_blinding = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.t_x_blinding); + let e_blinding = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.e_blinding); + + self.transcript.commit(t.as_bytes()); + self.transcript.commit(t_x_blinding.as_bytes()); + self.transcript.commit(e_blinding.as_bytes()); + + // Get a challenge value to combine statements for the IPP + let w = self.transcript.challenge_scalar(); + let Q = w * self.gens.pedersen_generators.B; + + let l_vec: Vec = proof_shares + .iter() + .flat_map(|ps| ps.l_vec.clone().into_iter()) + .collect(); + let r_vec: Vec = proof_shares + .iter() + .flat_map(|ps| ps.r_vec.clone().into_iter()) + .collect(); + + let ipp_proof = inner_product_proof::InnerProductProof::create( + self.transcript, + &Q, + util::exp_iter(self.value_challenge.y.invert()), + self.gens.G.to_vec(), + self.gens.H.to_vec(), + l_vec.clone(), + r_vec.clone(), + ); + + Ok(RangeProof { + A, + S, + T_1, + T_2, + t_x: t, + t_x_blinding, + e_blinding, + ipp_proof, + }) + } + + /// Assemble the final aggregated proof from the given + /// `proof_shares`, and validate that all input shares and the + /// aggregated proof are well-formed. If the aggregated proof is + /// not well-formed, this function detects which party submitted a + /// malformed share and returns that information as part of the + /// error. + /// + /// XXX define error types so we can surface the blame info + pub fn receive_shares( + mut self, + rng: &mut R, + proof_shares: &[ProofShare], + ) -> Result { + let proof = self.assemble_shares(proof_shares)?; + + // XXX if we change the proof verification API to use + // iterators we can do it with ZeRo-CoSt-AbStRaCtIonS + let value_commitments: Vec<_> = proof_shares + .iter() + .map(|ps| ps.value_commitment.V) + .collect(); + + // See comment in `Dealer::new` for why we use `initial_transcript` + if proof + .verify( + &value_commitments, + self.gens, + &mut self.initial_transcript, + rng, + self.n, + ) + .is_ok() + { + Ok(proof) + } else { + // Create a list of bad shares + let mut bad_shares = Vec::new(); + for (j, share) in proof_shares.iter().enumerate() { + match share.verify_share(self.n, j, &self.value_challenge, &self.poly_challenge) { + Ok(_) => {} + Err(_) => bad_shares.push(j), + } + } + // XXX pass this upwards + println!("bad shares: {:?}", bad_shares); + Err("proof failed to verify") + } + } + + /// Assemble the final aggregated proof from the given + /// `proof_shares`, but does not validate that they are well-formed. + /// + /// ## WARNING + /// + /// This function does **NOT** validate the proof shares. It is + /// suitable for creating aggregated proofs when all parties are + /// known by the dealer to be honest (for instance, when there's + /// only one party playing all roles). + /// + /// Otherwise, use `receive_shares`, which validates that all + /// shares are well-formed, or else detects which party(ies) + /// submitted malformed shares. + pub fn receive_trusted_shares( + mut self, + proof_shares: &[ProofShare], + ) -> Result { + self.assemble_shares(proof_shares) + } +} diff --git a/src/range_proof/messages.rs b/src/range_proof/messages.rs new file mode 100644 index 00000000..a716cadc --- /dev/null +++ b/src/range_proof/messages.rs @@ -0,0 +1,119 @@ +use std::iter; + +use curve25519_dalek::ristretto::{self, RistrettoPoint}; +use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::IsIdentity; + +use inner_product_proof; +use util; + +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] +pub struct ValueCommitment { + pub V: RistrettoPoint, + pub A: RistrettoPoint, + pub S: RistrettoPoint, +} + +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] +pub struct ValueChallenge { + pub y: Scalar, + pub z: Scalar, +} + +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] +pub struct PolyCommitment { + pub T_1: RistrettoPoint, + pub T_2: RistrettoPoint, +} + +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] +pub struct PolyChallenge { + pub x: Scalar, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct ProofShare { + pub value_commitment: ValueCommitment, + pub poly_commitment: PolyCommitment, + + pub t_x: Scalar, + pub t_x_blinding: Scalar, + pub e_blinding: Scalar, + + pub l_vec: Vec, + pub r_vec: Vec, +} + +impl ProofShare { + pub fn verify_share( + &self, + n: usize, + j: usize, + value_challenge: &ValueChallenge, + poly_challenge: &PolyChallenge, + ) -> Result<(), &'static str> { + use generators::{Generators, PedersenGenerators}; + let generators = Generators::new(PedersenGenerators::default(), n, j + 1); + let gen = generators.share(j); + + // renaming and precomputation + let x = poly_challenge.x; + let y = value_challenge.y; + let z = value_challenge.z; + let zz = z * z; + let minus_z = -z; + let z_j = util::exp_iter(z).take(j + 1).last().unwrap(); // z^j + let y_jn = util::exp_iter(y).take(j * n + 1).last().unwrap(); // y^(j*n) + let y_jn_inv = y_jn.invert(); // y^(-j*n) + let y_inv = y.invert(); // y^(-1) + + if self.t_x != inner_product_proof::inner_product(&self.l_vec, &self.r_vec) { + return Err("Inner product of l_vec and r_vec is not equal to t_x"); + } + + let g = self.l_vec.iter().map(|l_i| minus_z - l_i); + let h = self.r_vec + .iter() + .zip(util::exp_iter(Scalar::from_u64(2))) + .zip(util::exp_iter(y_inv)) + .map(|((r_i, exp_2), exp_y_inv)| { + z + exp_y_inv * y_jn_inv * (-r_i) + exp_y_inv * y_jn_inv * (zz * z_j * exp_2) + }); + let P_check = ristretto::vartime::multiscalar_mul( + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(iter::once(-self.e_blinding)) + .chain(g) + .chain(h), + iter::once(&self.value_commitment.A) + .chain(iter::once(&self.value_commitment.S)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)) + .chain(gen.G.iter()) + .chain(gen.H.iter()), + ); + if !P_check.is_identity() { + return Err("P check is not equal to zero"); + } + + let sum_of_powers_y = util::sum_of_powers(&y, n); + let sum_of_powers_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let delta = (z - zz) * sum_of_powers_y * y_jn - z * zz * sum_of_powers_2 * z_j; + let t_check = ristretto::vartime::multiscalar_mul( + iter::once(zz * z_j) + .chain(iter::once(x)) + .chain(iter::once(x * x)) + .chain(iter::once(delta - self.t_x)) + .chain(iter::once(-self.t_x_blinding)), + iter::once(&self.value_commitment.V) + .chain(iter::once(&self.poly_commitment.T_1)) + .chain(iter::once(&self.poly_commitment.T_2)) + .chain(iter::once(&gen.pedersen_generators.B)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)), + ); + if !t_check.is_identity() { + return Err("t check is not equal to zero"); + } + + Ok(()) + } +} diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs new file mode 100644 index 00000000..0ec4dc1a --- /dev/null +++ b/src/range_proof/mod.rs @@ -0,0 +1,700 @@ +#![allow(non_snake_case)] +#![doc(include = "../docs/range-proof-protocol.md")] + +use rand::Rng; +use std::iter; +use std::borrow::Borrow; + +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::IsIdentity; + +use generators::{Generators, GeneratorsView}; +use inner_product_proof::InnerProductProof; +use proof_transcript::ProofTranscript; +use util; + +// Modules for MPC protocol + +pub mod dealer; +pub mod messages; +pub mod party; + +/// The `RangeProof` struct represents a single range proof. +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct RangeProof { + /// Commitment to the bits of the value + A: RistrettoPoint, + /// Commitment to the blinding factors + S: RistrettoPoint, + /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) + T_1: RistrettoPoint, + /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) + T_2: RistrettoPoint, + /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) + t_x: Scalar, + /// Blinding factor for the synthetic commitment to \\(t(x)\\) + t_x_blinding: Scalar, + /// Blinding factor for the synthetic commitment to the inner-product arguments + e_blinding: Scalar, + /// Proof data for the inner-product argument. + ipp_proof: InnerProductProof, +} + +impl RangeProof { + /// Create a rangeproof for a given pair of value `v` and + /// blinding scalar `v_blinding`. + /// + /// XXX add doctests + pub fn prove_single( + generators: &Generators, + transcript: &mut ProofTranscript, + rng: &mut R, + v: u64, + v_blinding: &Scalar, + n: usize, + ) -> Result { + RangeProof::prove_multiple(generators, transcript, rng, &[v], &[*v_blinding], n) + } + + /// Create a rangeproof for a set of values. + /// + /// XXX add doctests + pub fn prove_multiple( + generators: &Generators, + transcript: &mut ProofTranscript, + rng: &mut R, + values: &[u64], + blindings: &[Scalar], + n: usize, + ) -> Result { + use self::dealer::*; + use self::party::*; + + if values.len() != blindings.len() { + return Err("mismatched values and blindings len"); + } + + let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; + + let parties: Vec<_> = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| { + Party::new(v, v_blinding, n, &generators) + }) + // Collect the iterator of Results into a Result, then unwrap it + .collect::,_>>()?; + + let (parties, value_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .enumerate() + .map(|(j, p)| p.assign_position(j, rng)) + .unzip(); + + let (dealer, value_challenge) = dealer.receive_value_commitments(&value_commitments)?; + + let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .map(|p| p.apply_challenge(&value_challenge, rng)) + .unzip(); + + let (dealer, poly_challenge) = dealer.receive_poly_commitments(&poly_commitments)?; + + let proof_shares: Vec<_> = parties + .into_iter() + .map(|p| p.apply_challenge(&poly_challenge)) + // Collect the iterator of Results into a Result, then unwrap it + .collect::,_>>()?; + + dealer.receive_trusted_shares(&proof_shares) + } + + /// Verifies a rangeproof for a given value commitment \\(V\\). + /// + /// This is a convenience wrapper around `verify` for the `m=1` case. + /// + /// XXX add doctests + pub fn verify_single( + &self, + V: &RistrettoPoint, + gens: GeneratorsView, + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + ) -> Result<(), &'static str> { + self.verify(&[*V], gens, transcript, rng, n) + } + + /// Verifies an aggregated rangeproof for the given value commitments. + /// + /// XXX add doctests + pub fn verify( + &self, + value_commitments: &[RistrettoPoint], + gens: GeneratorsView, + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + ) -> Result<(), &'static str> { + RangeProof::verify_batch( + iter::once((self, value_commitments, n)), + gens, + transcript, + rng, + ) + } + + /// Verifies multiple range proofs at once. + /// If any range proof is invalid, the whole batch is invalid. + /// Proofs may use different ranges (`n`) or different number of aggregated commitments (`m`). + /// You must provide big enough view into generators (`gens`) that covers + /// the biggest proof + pub fn verify_batch<'a,'b,I,R,P,V>( + proofs: I, + gens: GeneratorsView, // must have enough points to cover max(m*n) + transcript: &mut ProofTranscript, + rng: &mut R + ) -> Result<(), &'static str> + where + R: Rng, + I: IntoIterator, + P: Borrow, + V: AsRef<[RistrettoPoint]> + { + let mut nm: usize = 0; + let batch = proofs.into_iter().map(|(p, vcs, n)| { + let m = vcs.as_ref().len(); + let v = p.borrow().prepare_verification(n, vcs, &mut transcript.clone(), rng); + nm = nm.max(n*m); + v + }).collect::>(); + + if gens.G.len() < nm { + return Err( + "The generators view does not have enough generators for the largest proof", + ); + } + + // First statement is used without a random factor + let mut pedersen_base_scalars: (Scalar, Scalar) = (Scalar::zero(), Scalar::zero()); + let mut g_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); + let mut h_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); + + let dynamic_base_scalars = batch.iter().flat_map(|v| { + v.dynamic_base_scalars.iter() + }); + let dynamic_bases = batch.iter().flat_map(|v| { + v.dynamic_bases.iter() + }); + + // All statements are added up. Each scalar in each statement + // already has a challenge pre-multiplied in `prepare_verification`. + for verification in &batch { + + pedersen_base_scalars.0 += verification.pedersen_base_scalars.0; + pedersen_base_scalars.1 += verification.pedersen_base_scalars.1; + + // Note: these loops may be shorter than the total amount of scalars if `n*m < max({n*m})` + for (i, s) in verification.g_scalars.iter().enumerate() { + g_scalars[i] += s; + } + for (i, s) in verification.h_scalars.iter().enumerate() { + h_scalars[i] += s; + } + } + + let mega_check = ristretto::vartime::multiscalar_mul( + iter::once(&pedersen_base_scalars.0) + .chain(iter::once(&pedersen_base_scalars.1)) + .chain(g_scalars.iter()) + .chain(h_scalars.iter()) + .chain(dynamic_base_scalars), + iter::once(&gens.pedersen_generators.B) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(gens.G.iter().take(nm)) + .chain(gens.H.iter().take(nm)) + .chain(dynamic_bases), + ); + + if mega_check.is_identity() { + Ok(()) + } else { + Err("Verification failed") + } + } + + /// Prepares a `Verification` struct + /// that can be combined with others in a batch. + /// Note: all scalars are pre-multiplied by a random challenge. + fn prepare_verification( + &self, + n: usize, + value_commitments: V, + transcript: &mut ProofTranscript, + rng: &mut R, + ) -> Verification + where + R: Rng, + V: AsRef<[RistrettoPoint]> + { + // First, replay the "interactive" protocol using the proof + // data to recompute all challenges. + + let m = value_commitments.as_ref().len(); + + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + + for V in value_commitments.as_ref().iter() { + transcript.commit(V.borrow().compress().as_bytes()); + } + transcript.commit(self.A.compress().as_bytes()); + transcript.commit(self.S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + let zz = z * z; + let minus_z = -z; + + transcript.commit(self.T_1.compress().as_bytes()); + transcript.commit(self.T_2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + transcript.commit(self.t_x.as_bytes()); + transcript.commit(self.t_x_blinding.as_bytes()); + transcript.commit(self.e_blinding.as_bytes()); + + let w = transcript.challenge_scalar(); + + // Challenge value for combining two statements within a rangeproof. + let c = Scalar::random(rng); + + let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); + let s_inv = s.iter().rev(); + + let a = self.ipp_proof.a; + let b = self.ipp_proof.b; + + // Construct concat_z_and_2, an iterator of the values of + // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n + let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); + let powers_of_z = util::exp_iter(z).take(m); + let concat_z_and_2 = + powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); + + let g = s.iter() + .map(|s_i| minus_z - a * s_i); + let h = s_inv + .zip(util::exp_iter(y.invert())) + .zip(concat_z_and_2) + .map(|((s_i_inv, exp_y_inv), z_and_2)| { + z + exp_y_inv * (zz * z_and_2 - b * s_i_inv) + }); + + let value_commitment_scalars = util::exp_iter(z) + .take(m) + .map(|z_exp| c * zz * z_exp); + + let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); + + // Challenge value for combining the complete range proof statement with other range proof statements. + let batch_challenge = Scalar::random(rng); + + Verification { + pedersen_base_scalars: ( + batch_challenge*basepoint_scalar, + batch_challenge*(-self.e_blinding - c * self.t_x_blinding) + ), + g_scalars: g.map(|s| batch_challenge*s ).collect(), + h_scalars: h.map(|s| batch_challenge*s ).collect(), + dynamic_base_scalars: iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(value_commitment_scalars) + .chain(iter::once(c * x)) + .chain(iter::once(c * x * x)) + .chain(x_sq.iter().cloned()) + .chain(x_inv_sq.iter().cloned()) + .map(|s| batch_challenge*s ) + .collect(), + dynamic_bases: iter::once(&self.A) + .chain(iter::once(&self.S)) + .chain(value_commitments.as_ref().iter()) + .chain(iter::once(&self.T_1)) + .chain(iter::once(&self.T_2)) + .chain(self.ipp_proof.L_vec.iter()) + .chain(self.ipp_proof.R_vec.iter()) + .cloned() + .collect(), + } + } +} + +/// Represents a deferred computation to verify a single rangeproof. +/// Multiple instances can be verified more efficient as a batch using +/// `RangeProof::verify_batch` function. +struct Verification { + /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. + pedersen_base_scalars: (Scalar, Scalar), + + /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + g_scalars: Vec, + + /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + h_scalars: Vec, + + /// List of scalars for any number of dynamic bases. + dynamic_base_scalars: Vec, + + /// List of dynamic bases for the corresponding scalars. + dynamic_bases: Vec, +} + + +/// Compute +/// \\[ +/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle +/// \\] +fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { + let sum_y = util::sum_of_powers(y, n * m); + let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let sum_z = util::sum_of_powers(z, m); + + (z - z * z) * sum_y - z * z * z * sum_2 * sum_z +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::OsRng; + + use generators::PedersenGenerators; + + #[test] + fn test_delta() { + let mut rng = OsRng::new().unwrap(); + let y = Scalar::random(&mut rng); + let z = Scalar::random(&mut rng); + + // Choose n = 256 to ensure we overflow the group order during + // the computation, to check that that's done correctly + let n = 256; + + // code copied from previous implementation + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + assert_eq!(power_g, delta(n, 1, &y, &z),); + } + + /// Given a bitsize `n`, test the following: + /// + /// 1. Generate `m` random values and create a proof they are all in range; + /// 2. Serialize to wire format; + /// 3. Deserialize from wire format; + /// 4. Verify the proof. + fn singleparty_create_and_verify_helper(n: usize, m: usize) { + // Split the test into two scopes, so that it's explicit what + // data is shared between the prover and the verifier. + + // Use bincode for serialization + use bincode; + + // Both prover and verifier have access to the generators and the proof + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let (proof_bytes, value_commitments) = singleparty_create_helper(n, m); + + println!( + "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", + m, + n, + proof_bytes.len(), + ); + + // Verifier's scope + { + // 3. Deserialize + let proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); + + // 4. Verify with the same customization label as above + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + assert!( + proof + .verify( + &value_commitments, + generators.all(), + &mut transcript, + &mut rng, + n, + ) + .is_ok() + ); + } + } + + /// Generates and verifies a number of proofs in a batch + /// with the given pairs of `n,m` parameters (range in bits, number of commitments). + fn batch_verify_helper(nm: &[(usize, usize)]) { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let inputs = nm.iter() + .map(|(n, m)| { + let (p, vc) = singleparty_create_helper(*n, *m); + let proof = bincode::deserialize::(&p).unwrap(); + (proof, vc, *n) + }); + + let max_nm = nm.iter().map(|(n,m)| n * m).max().unwrap_or(0); + + // hackish split of `n*m` into `n*m, 1` because we do not want + // to compute more 8 generators for the case such as ((4,1),(2,2)). + let generators = Generators::new(PedersenGenerators::default(), max_nm, 1); + + assert!(RangeProof::verify_batch(inputs, generators.all(), &mut transcript, &mut rng).is_ok()); + } + + + /// Generates a `n`-bit rangeproof for `m` commitments. + /// Returns serialized proof and the list of commitments. + fn singleparty_create_helper(n: usize, m: usize) -> (Vec, Vec) { + // Split the test into two scopes, so that it's explicit what + // data is shared between the prover and the verifier. + + // Use bincode for serialization + use bincode; + + // Both prover and verifier have access to the generators and the proof + let generators = Generators::new(PedersenGenerators::default(), n, m); + + // Serialized proof data + let proof_bytes: Vec; + let value_commitments: Vec; + + // 1. Generate the proof + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); + + let proof = RangeProof::prove_multiple( + &generators, + &mut transcript, + &mut rng, + &values, + &blindings, + n, + ).unwrap(); + + // 2. Serialize + proof_bytes = bincode::serialize(&proof).unwrap(); + + let pg = &generators.all().pedersen_generators; + + // XXX would be nice to have some convenience API for this + value_commitments = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| { + pg.commit(Scalar::from_u64(v), v_blinding) + }) + .collect(); + + (proof_bytes, value_commitments) + } + + #[test] + fn create_and_verify_n_32_m_1() { + singleparty_create_and_verify_helper(32, 1); + } + + #[test] + fn create_and_verify_n_32_m_2() { + singleparty_create_and_verify_helper(32, 2); + } + + #[test] + fn create_and_verify_n_32_m_4() { + singleparty_create_and_verify_helper(32, 4); + } + + #[test] + fn create_and_verify_n_32_m_8() { + singleparty_create_and_verify_helper(32, 8); + } + + #[test] + fn create_and_verify_n_64_m_1() { + singleparty_create_and_verify_helper(64, 1); + } + + #[test] + fn create_and_verify_n_64_m_2() { + singleparty_create_and_verify_helper(64, 2); + } + + #[test] + fn create_and_verify_n_64_m_4() { + singleparty_create_and_verify_helper(64, 4); + } + + #[test] + fn create_and_verify_n_64_m_8() { + singleparty_create_and_verify_helper(64, 8); + } + + #[test] + fn batch_verify_n_32_m_1() { + batch_verify_helper(&[(32, 1)]); + batch_verify_helper(&[(32, 1), (32, 1)]); + batch_verify_helper(&[(32, 1), (32, 1), (32, 1)]); + } + + #[test] + fn batch_verify_n_64_m_differ() { + batch_verify_helper(&[(32, 1), (32, 2)]); + batch_verify_helper(&[(32, 1), (32, 2), (32, 4)]); + } + + #[test] + fn batch_verify_n_differ_m_differ_total_64() { + batch_verify_helper(&[(64, 1), (32, 2), (16, 4)]); + } + + #[test] + fn batch_verify_n_differ_m_differ_total_256() { + batch_verify_helper(&[(16, 1), (32, 2), (64, 4)]); + } + + #[test] + fn detect_dishonest_party_during_aggregation() { + use self::dealer::*; + use self::party::*; + + // Simulate four parties, two of which will be dishonest and use a 64-bit value. + let m = 4; + let n = 32; + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + // Parties 0, 2 are honest and use a 32-bit value + let v0 = rng.next_u32() as u64; + let v0_blinding = Scalar::random(&mut rng); + let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); + + let v2 = rng.next_u32() as u64; + let v2_blinding = Scalar::random(&mut rng); + let party2 = Party::new(v2, v2_blinding, n, &generators).unwrap(); + + // Parties 1, 3 are dishonest and use a 64-bit value + let v1 = rng.next_u64(); + let v1_blinding = Scalar::random(&mut rng); + let party1 = Party::new(v1, v1_blinding, n, &generators).unwrap(); + + let v3 = rng.next_u64(); + let v3_blinding = Scalar::random(&mut rng); + let party3 = Party::new(v3, v3_blinding, n, &generators).unwrap(); + + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); + + let (party0, value_com0) = party0.assign_position(0, &mut rng); + let (party1, value_com1) = party1.assign_position(1, &mut rng); + let (party2, value_com2) = party2.assign_position(2, &mut rng); + let (party3, value_com3) = party3.assign_position(3, &mut rng); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&[value_com0, value_com1, value_com2, value_com3]) + .unwrap(); + + let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); + let (party1, poly_com1) = party1.apply_challenge(&value_challenge, &mut rng); + let (party2, poly_com2) = party2.apply_challenge(&value_challenge, &mut rng); + let (party3, poly_com3) = party3.apply_challenge(&value_challenge, &mut rng); + + let (dealer, poly_challenge) = dealer + .receive_poly_commitments(&[poly_com0, poly_com1, poly_com2, poly_com3]) + .unwrap(); + + let share0 = party0.apply_challenge(&poly_challenge).unwrap(); + let share1 = party1.apply_challenge(&poly_challenge).unwrap(); + let share2 = party2.apply_challenge(&poly_challenge).unwrap(); + let share3 = party3.apply_challenge(&poly_challenge).unwrap(); + + match dealer.receive_shares(&mut rng, &[share0, share1, share2, share3]) { + Ok(_proof) => { + panic!("The proof was malformed, but it was not detected"); + } + Err(e) => { + // XXX when we have error types, check that it was party 1 that did it + assert_eq!(e, "proof failed to verify"); + } + } + } + + #[test] + fn detect_dishonest_dealer_during_aggregation() { + use self::dealer::*; + use self::party::*; + + // Simulate one party + let m = 1; + let n = 32; + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v0 = rng.next_u32() as u64; + let v0_blinding = Scalar::random(&mut rng); + let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); + + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); + + // Now do the protocol flow as normal.... + + let (party0, value_com0) = party0.assign_position(0, &mut rng); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&[value_com0]) + .unwrap(); + + let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); + + let (_dealer, mut poly_challenge) = dealer + .receive_poly_commitments(&[poly_com0]) + .unwrap(); + + // But now simulate a malicious dealer choosing x = 0 + poly_challenge.x = Scalar::zero(); + + let maybe_share0 = party0.apply_challenge(&poly_challenge); + + // XXX when we have error types, check finer info than "was error" + assert!(maybe_share0.is_err()); + } +} diff --git a/src/range_proof/party.rs b/src/range_proof/party.rs new file mode 100644 index 00000000..0508d65c --- /dev/null +++ b/src/range_proof/party.rs @@ -0,0 +1,227 @@ +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use generators::Generators; +use rand::Rng; +use std::iter; +use util; + +use super::messages::*; + +/// Party is an entry-point API for setting up a party. +pub struct Party {} + +impl Party { + pub fn new( + v: u64, + v_blinding: Scalar, + n: usize, + generators: &Generators, + ) -> Result { + if !n.is_power_of_two() || n > 64 { + return Err("n is not valid: must be a power of 2, and less than or equal to 64"); + } + let V = generators + .share(0) + .pedersen_generators + .commit(Scalar::from_u64(v), v_blinding); + + Ok(PartyAwaitingPosition { + generators: generators, + n, + v, + v_blinding, + V, + }) + } +} + +/// As party awaits its position, they only know their value and desired bit-size of the proof. +pub struct PartyAwaitingPosition<'a> { + generators: &'a Generators, + n: usize, + v: u64, + v_blinding: Scalar, + V: RistrettoPoint, +} + +impl<'a> PartyAwaitingPosition<'a> { + /// Assigns the position to a party, + /// at which point the party knows its generators. + pub fn assign_position( + self, + j: usize, + mut rng: &mut R, + ) -> (PartyAwaitingValueChallenge<'a>, ValueCommitment) { + let gen_share = self.generators.share(j); + + let a_blinding = Scalar::random(&mut rng); + // Compute A = + + a_blinding * B_blinding + let mut A = gen_share.pedersen_generators.B_blinding * a_blinding; + + use subtle::{Choice, ConditionallyAssignable}; + for i in 0..self.n { + // If v_i = 0, we add a_L[i] * G[i] + a_R[i] * H[i] = - H[i] + // If v_i = 1, we add a_L[i] * G[i] + a_R[i] * H[i] = G[i] + let v_i = Choice::from(((self.v >> i) & 1) as u8); + let mut point = -gen_share.H[i]; + point.conditional_assign(&gen_share.G[i], v_i); + A += point; + } + + let s_blinding = Scalar::random(&mut rng); + let s_L: Vec = (0..self.n).map(|_| Scalar::random(&mut rng)).collect(); + let s_R: Vec = (0..self.n).map(|_| Scalar::random(&mut rng)).collect(); + + // Compute S = + + s_blinding * B_blinding + let S = ristretto::multiscalar_mul( + iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), + iter::once(&gen_share.pedersen_generators.B_blinding) + .chain(gen_share.G.iter()) + .chain(gen_share.H.iter()), + ); + + // Return next state and all commitments + let value_commitment = ValueCommitment { V: self.V, A, S }; + let next_state = PartyAwaitingValueChallenge { + n: self.n, + v: self.v, + v_blinding: self.v_blinding, + generators: self.generators, + j, + value_commitment, + a_blinding, + s_blinding, + s_L, + s_R, + }; + (next_state, value_commitment) + } +} + +/// When party knows its position (`j`), it can produce commitments +/// to all bits of the value and necessary blinding factors. +pub struct PartyAwaitingValueChallenge<'a> { + n: usize, // bitsize of the range + v: u64, + v_blinding: Scalar, + + j: usize, + generators: &'a Generators, + value_commitment: ValueCommitment, + a_blinding: Scalar, + s_blinding: Scalar, + s_L: Vec, + s_R: Vec, +} + +impl<'a> PartyAwaitingValueChallenge<'a> { + pub fn apply_challenge( + self, + vc: &ValueChallenge, + rng: &mut R, + ) -> (PartyAwaitingPolyChallenge, PolyCommitment) { + let n = self.n; + let offset_y = util::scalar_exp_vartime(&vc.y, (self.j * n) as u64); + let offset_z = util::scalar_exp_vartime(&vc.z, self.j as u64); + + // Calculate t by calculating vectors l0, l1, r0, r1 and multiplying + let mut l_poly = util::VecPoly1::zero(n); + let mut r_poly = util::VecPoly1::zero(n); + + let zz = vc.z * vc.z; + let mut exp_y = offset_y; // start at y^j + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for i in 0..n { + let a_L_i = Scalar::from_u64((self.v >> i) & 1); + let a_R_i = a_L_i - Scalar::one(); + + l_poly.0[i] = a_L_i - vc.z; + l_poly.1[i] = self.s_L[i]; + r_poly.0[i] = exp_y * (a_R_i + vc.z) + zz * offset_z * exp_2; + r_poly.1[i] = exp_y * self.s_R[i]; + + exp_y = exp_y * vc.y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + let t_poly = l_poly.inner_product(&r_poly); + + // Generate x by committing to T_1, T_2 (line 49-54) + let t_1_blinding = Scalar::random(rng); + let t_2_blinding = Scalar::random(rng); + let T_1 = self.generators + .share(self.j) + .pedersen_generators + .commit(t_poly.1, t_1_blinding); + let T_2 = self.generators + .share(self.j) + .pedersen_generators + .commit(t_poly.2, t_2_blinding); + + let poly_commitment = PolyCommitment { T_1, T_2 }; + + let papc = PartyAwaitingPolyChallenge { + value_commitment: self.value_commitment, + poly_commitment, + z: vc.z, + offset_z, + l_poly, + r_poly, + t_poly, + v_blinding: self.v_blinding, + a_blinding: self.a_blinding, + s_blinding: self.s_blinding, + t_1_blinding, + t_2_blinding, + }; + + (papc, poly_commitment) + } +} + +pub struct PartyAwaitingPolyChallenge { + value_commitment: ValueCommitment, + poly_commitment: PolyCommitment, + z: Scalar, + offset_z: Scalar, + l_poly: util::VecPoly1, + r_poly: util::VecPoly1, + t_poly: util::Poly2, + v_blinding: Scalar, + a_blinding: Scalar, + s_blinding: Scalar, + t_1_blinding: Scalar, + t_2_blinding: Scalar, +} + +impl PartyAwaitingPolyChallenge { + pub fn apply_challenge(self, pc: &PolyChallenge) -> Result { + // Prevent a malicious dealer from annihilating the blinding factors: + if pc.x == Scalar::zero() { + return Err("Poly challenge was zero, which would leak secrets, bailing out"); + } + + let t_blinding_poly = util::Poly2( + self.z * self.z * self.offset_z * self.v_blinding, + self.t_1_blinding, + self.t_2_blinding, + ); + + let t_x = self.t_poly.eval(pc.x); + let t_x_blinding = t_blinding_poly.eval(pc.x); + let e_blinding = self.a_blinding + self.s_blinding * &pc.x; + let l_vec = self.l_poly.eval(pc.x); + let r_vec = self.r_poly.eval(pc.x); + + Ok(ProofShare { + value_commitment: self.value_commitment, + poly_commitment: self.poly_commitment, + t_x_blinding, + t_x, + e_blinding, + l_vec, + r_vec, + }) + } +} diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 00000000..007b725f --- /dev/null +++ b/src/util.rs @@ -0,0 +1,212 @@ +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use curve25519_dalek::scalar::Scalar; +use inner_product_proof::inner_product; + +/// Represents a degree-1 vector polynomial \\(\mathbf{a} + \mathbf{b} \cdot x\\). +pub struct VecPoly1(pub Vec, pub Vec); + +/// Represents a degree-2 scalar polynomial \\(a + b \cdot x + c \cdot x^2\\) +pub struct Poly2(pub Scalar, pub Scalar, pub Scalar); + +/// Provides an iterator over the powers of a `Scalar`. +/// +/// This struct is created by the `exp_iter` function. +pub struct ScalarExp { + x: Scalar, + next_exp_x: Scalar, +} + +impl Iterator for ScalarExp { + type Item = Scalar; + + fn next(&mut self) -> Option { + let exp_x = self.next_exp_x; + self.next_exp_x *= self.x; + Some(exp_x) + } +} + +/// Return an iterator of the powers of `x`. +pub fn exp_iter(x: Scalar) -> ScalarExp { + let next_exp_x = Scalar::one(); + ScalarExp { x, next_exp_x } +} + +pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { + let mut out = Vec::new(); + if a.len() != b.len() { + // throw some error + println!("lengths of vectors don't match for vector addition"); + } + for i in 0..a.len() { + out.push(a[i] + b[i]); + } + out +} + +impl VecPoly1 { + pub fn zero(n: usize) -> Self { + VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + } + + pub fn inner_product(&self, rhs: &VecPoly1) -> Poly2 { + // Uses Karatsuba's method + let l = self; + let r = rhs; + + let t0 = inner_product(&l.0, &r.0); + let t2 = inner_product(&l.1, &r.1); + + let l0_plus_l1 = add_vec(&l.0, &l.1); + let r0_plus_r1 = add_vec(&r.0, &r.1); + + let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + + Poly2(t0, t1, t2) + } + + pub fn eval(&self, x: Scalar) -> Vec { + let n = self.0.len(); + let mut out = vec![Scalar::zero(); n]; + for i in 0..n { + out[i] += self.0[i] + self.1[i] * x; + } + out + } +} + +impl Poly2 { + pub fn eval(&self, x: Scalar) -> Scalar { + self.0 + x * (self.1 + x * self.2) + } +} + +/// Raises `x` to the power `n` using binary exponentiation, +/// with (1 to 2)*lg(n) scalar multiplications. +/// TODO: a consttime version of this would be awfully similar to a Montgomery ladder. +pub fn scalar_exp_vartime(x: &Scalar, mut n: u64) -> Scalar { + let mut result = Scalar::one(); + let mut aux = *x; // x, x^2, x^4, x^8, ... + while n > 0 { + let bit = n & 1; + if bit == 1 { + result = result * aux; + } + n = n >> 1; + aux = aux * aux; // FIXME: one unnecessary mult at the last step here! + } + result +} + +/// Takes the sum of all the powers of `x`, up to `n` +/// If `n` is a power of 2, it uses the efficient algorithm with `2*lg n` multiplcations and additions. +/// If `n` is not a power of 2, it uses the slow algorithm with `n` multiplications and additions. +/// In the Bulletproofs case, all calls to `sum_of_powers` should have `n` as a power of 2. +pub fn sum_of_powers(x: &Scalar, n: usize) -> Scalar { + if !n.is_power_of_two() { + return sum_of_powers_slow(x, n); + } + if n == 0 || n == 1 { + return Scalar::from_u64(n as u64); + } + let mut m = n; + let mut result = Scalar::one() + x; + let mut factor = *x; + while m > 2 { + factor = factor * factor; + result = result + factor * result; + m = m / 2; + } + result +} + +// takes the sum of all of the powers of x, up to n +fn sum_of_powers_slow(x: &Scalar, n: usize) -> Scalar { + exp_iter(*x).take(n).fold(Scalar::zero(), |acc, x| acc + x) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn exp_2_is_powers_of_2() { + let exp_2: Vec<_> = exp_iter(Scalar::from_u64(2)).take(4).collect(); + + assert_eq!(exp_2[0], Scalar::from_u64(1)); + assert_eq!(exp_2[1], Scalar::from_u64(2)); + assert_eq!(exp_2[2], Scalar::from_u64(4)); + assert_eq!(exp_2[3], Scalar::from_u64(8)); + } + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } + + /// Raises `x` to the power `n`. + fn scalar_exp_vartime_slow(x: &Scalar, n: u64) -> Scalar { + let mut result = Scalar::one(); + for _ in 0..n { + result = result * x; + } + result + } + + #[test] + fn test_scalar_exp() { + let x = Scalar::from_bits( + *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", + ); + assert_eq!(scalar_exp_vartime(&x, 0), Scalar::one()); + assert_eq!(scalar_exp_vartime(&x, 1), x); + assert_eq!(scalar_exp_vartime(&x, 2), x * x); + assert_eq!(scalar_exp_vartime(&x, 3), x * x * x); + assert_eq!(scalar_exp_vartime(&x, 4), x * x * x * x); + assert_eq!(scalar_exp_vartime(&x, 5), x * x * x * x * x); + assert_eq!(scalar_exp_vartime(&x, 64), scalar_exp_vartime_slow(&x, 64)); + assert_eq!( + scalar_exp_vartime(&x, 0b11001010), + scalar_exp_vartime_slow(&x, 0b11001010) + ); + } + + #[test] + fn test_sum_of_powers() { + let x = Scalar::from_u64(10); + assert_eq!(sum_of_powers_slow(&x, 0), sum_of_powers(&x, 0)); + assert_eq!(sum_of_powers_slow(&x, 1), sum_of_powers(&x, 1)); + assert_eq!(sum_of_powers_slow(&x, 2), sum_of_powers(&x, 2)); + assert_eq!(sum_of_powers_slow(&x, 4), sum_of_powers(&x, 4)); + assert_eq!(sum_of_powers_slow(&x, 8), sum_of_powers(&x, 8)); + assert_eq!(sum_of_powers_slow(&x, 16), sum_of_powers(&x, 16)); + assert_eq!(sum_of_powers_slow(&x, 32), sum_of_powers(&x, 32)); + assert_eq!(sum_of_powers_slow(&x, 64), sum_of_powers(&x, 64)); + } + + #[test] + fn test_sum_of_powers_slow() { + let x = Scalar::from_u64(10); + assert_eq!(sum_of_powers_slow(&x, 0), Scalar::zero()); + assert_eq!(sum_of_powers_slow(&x, 1), Scalar::one()); + assert_eq!(sum_of_powers_slow(&x, 2), Scalar::from_u64(11)); + assert_eq!(sum_of_powers_slow(&x, 3), Scalar::from_u64(111)); + assert_eq!(sum_of_powers_slow(&x, 4), Scalar::from_u64(1111)); + assert_eq!(sum_of_powers_slow(&x, 5), Scalar::from_u64(11111)); + assert_eq!(sum_of_powers_slow(&x, 6), Scalar::from_u64(111111)); + } +}