From 98b2c9afed88f4eff6ecce98149d3cfc889f8187 Mon Sep 17 00:00:00 2001 From: Cathie Yun Date: Thu, 1 Feb 2018 17:40:59 -0800 Subject: [PATCH 001/186] create readme --- README.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..33606f3c --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# ristretto-bulletproofs + +Implementing [bulletproofs](https://crypto.stanford.edu/bulletproofs/) using [ristretto](https://github.com/dalek-cryptography/ed25519-dalek). From e745b83e4d0314c5a04558793feadd004d9bf2c7 Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 2 Feb 2018 14:48:56 -0800 Subject: [PATCH 002/186] Added comments, some more progress on generating t --- src/lib.rs | 51 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e21b08f4..2b70e7f4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,7 @@ use std::iter; use curve25519_dalek::ristretto::{RistrettoPoint}; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; -use sha2::{Digest, Sha256}; +use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; use rand::OsRng; @@ -17,13 +17,21 @@ struct RangeProof { } +struct Degree3Poly { + pub t0: Scalar, + pub t1: Scalar, + pub t2: Scalar +} + impl RangeProof { pub fn generate_proof(v: u64, len: usize, a: &RistrettoPoint, b: &RistrettoPoint) -> RangeProof { let mut rng: OsRng = OsRng::new().unwrap(); + // Generate groups a, b (in the paper: groups g, h) let b_vec = make_generators(b, len); let a_vec = make_generators(a, len); + // Compute big_a (in the paper: A; line 36-39) let alpha = RistrettoPoint::random(&mut rng); let mut big_a = alpha.clone(); for i in 0..len { @@ -35,15 +43,28 @@ impl RangeProof { } } + // Compute big_s (in the paper: S; line 40-42) let points_iter = iter::once(a).chain(b_vec.iter()).chain(a_vec.iter()); let randomness: Vec<_> = (0..2*len+1).map(|_| Scalar::random(&mut rng)).collect(); let big_s = ristretto::multiscalar_mult(&randomness, points_iter); + // Save/label randomness (rho, s_L, s_R) to be used later let _rho = &randomness[0]; let _s_l = &randomness[1..len+1]; let _s_r = &randomness[len+1..2*len+1]; - let (_y, _z) = commit(&big_a, &big_s); + // Generate y, z by committing to A, S (line 43-45) + let (_y, z) = commit(&big_a, &big_s); + + // Calculate t (line 46) + let mut product = Degree3Poly::new(); + let z2 = z * z; + let z3 = z2 * z; + let k = -z2 + z3; // not correct yet + product.t0 = z + z2 + k; // not correct yet + + // Generate x by committing to T_1, T_2 (line 47-51) + // let (x, _) = commit(t1, t2); unimplemented!() } @@ -53,6 +74,16 @@ impl RangeProof { } } +impl Degree3Poly { + pub fn new() -> Self { + Self { + t0: Scalar::zero(), + t1: Scalar::zero(), + t2: Scalar::zero(), + } + } +} + pub fn make_generators(point: &RistrettoPoint, len: usize) -> Vec { @@ -66,17 +97,17 @@ pub fn make_generators(point: &RistrettoPoint, len: usize) generators } -pub fn commit(a: &RistrettoPoint, s: &RistrettoPoint) -> (RistrettoPoint, RistrettoPoint) { - let mut y_digest = Sha256::new(); +pub fn commit(a: &RistrettoPoint, b: &RistrettoPoint) -> (Scalar, Scalar) { + let mut y_digest = Sha512::new(); y_digest.input(a.compress().as_bytes()); - y_digest.input(s.compress().as_bytes()); - let y = RistrettoPoint::hash_from_bytes::(&y_digest.result()); + y_digest.input(b.compress().as_bytes()); + let y = Scalar::from_hash(y_digest); - let mut z_digest = Sha256::new(); + let mut z_digest = Sha512::new(); z_digest.input(a.compress().as_bytes()); - z_digest.input(s.compress().as_bytes()); - z_digest.input(y.compress().as_bytes()); - let z = RistrettoPoint::hash_from_bytes::(&z_digest.result()); + z_digest.input(b.compress().as_bytes()); + z_digest.input(y.as_bytes()); + let z = Scalar::from_hash(z_digest); (y, z) } From 327d21bf47f3c56ed80d694cb786ecedb3b71b9e Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 2 Feb 2018 17:33:19 -0800 Subject: [PATCH 003/186] add helper functions --- README.md | 4 +++- src/lib.rs | 54 +++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 0ad97ec4..f56790c3 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,4 @@ # ristretto-bulletproofs -Implementing [bulletproofs](https://crypto.stanford.edu/bulletproofs/) using [ristretto](https://github.com/dalek-cryptography/ed25519-dalek). \ No newline at end of file +Implementing [bulletproofs](https://crypto.stanford.edu/bulletproofs/) using [ristretto](https://github.com/dalek-cryptography/ed25519-dalek). + +Step 1 of a larger proposed proposed plan, detailed [here](https://github.com/chain/research/issues/7). \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 2b70e7f4..90c06956 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,6 +5,8 @@ extern crate sha2; extern crate test; extern crate rand; use std::iter; +use std::ops::Add; +use std::ops::Mul; use curve25519_dalek::ristretto::{RistrettoPoint}; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; @@ -18,9 +20,9 @@ struct RangeProof { } struct Degree3Poly { - pub t0: Scalar, - pub t1: Scalar, - pub t2: Scalar + pub d0: Scalar, + pub d1: Scalar, + pub d2: Scalar } impl RangeProof { @@ -50,18 +52,26 @@ impl RangeProof { // Save/label randomness (rho, s_L, s_R) to be used later let _rho = &randomness[0]; - let _s_l = &randomness[1..len+1]; + let s_l = &randomness[1..len+1]; let _s_r = &randomness[len+1..2*len+1]; // Generate y, z by committing to A, S (line 43-45) - let (_y, z) = commit(&big_a, &big_s); + let (y, z) = commit(&big_a, &big_s); // Calculate t (line 46) - let mut product = Degree3Poly::new(); + let a_l = Scalar::from_u64(v); let z2 = z * z; let z3 = z2 * z; - let k = -z2 + z3; // not correct yet - product.t0 = z + z2 + k; // not correct yet + let l0 = a_l - z; + let l1 = s_l; + let r0 = z2; + let mut r1: Vec = Vec::new(); // actually make this an iterator? + // calculate r1 + let mut t = Degree3Poly::new(); + t.d0 = z*y + (a_l - y)*z2 - z3; + // t.d1 = r0*l1 + l0*r1; + // t.d2 = r1*l1; + // Generate x by committing to T_1, T_2 (line 47-51) // let (x, _) = commit(t1, t2); @@ -77,13 +87,35 @@ impl RangeProof { impl Degree3Poly { pub fn new() -> Self { Self { - t0: Scalar::zero(), - t1: Scalar::zero(), - t2: Scalar::zero(), + d0: Scalar::zero(), + d1: Scalar::zero(), + d2: Scalar::zero(), } } } +pub fn hadamard_product(a: Vec, b: Vec) -> Vec { + let mut result = Vec::new(); + if a.len() != b.len() { + // throw some error + } + for i in 0..a.len() { + result[i] = a[i] * b[i]; + } + result +} + +pub fn inner_product(a: Vec, b: Vec) -> Scalar { + let mut result = Scalar::zero(); + if a.len() != b.len() { + // throw some error + } + for i in 0..a.len() { + result += a[i] * b[i]; + } + result +} + pub fn make_generators(point: &RistrettoPoint, len: usize) -> Vec { From f1087fc128cd1d1691fb2cbc682c0f025b0b2320 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Sat, 3 Feb 2018 10:09:56 -0800 Subject: [PATCH 004/186] syntax fix --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 90c06956..8503a297 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -150,7 +150,7 @@ mod tests { #[test] fn test_make_generators() { use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - println!("{:?}", make_generators(RISTRETTO_BASEPOINT_POINT, 20)); + println!("{:?}", make_generators(&RISTRETTO_BASEPOINT_POINT, 20)); } } @@ -161,6 +161,6 @@ mod bench { #[bench] fn benchmark_make_generators(b: &mut Bencher) { use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - b.iter(|| make_generators(RISTRETTO_BASEPOINT_POINT, 100)); + b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } } From 79a701a6987a30f38c2bb63f96a15a03805cf7e5 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 5 Feb 2018 14:04:40 -0800 Subject: [PATCH 005/186] Generate t1, t2 --- src/lib.rs | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 8503a297..41e9cc6a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,12 +19,6 @@ struct RangeProof { } -struct Degree3Poly { - pub d0: Scalar, - pub d1: Scalar, - pub d2: Scalar -} - impl RangeProof { pub fn generate_proof(v: u64, len: usize, a: &RistrettoPoint, b: &RistrettoPoint) -> RangeProof { let mut rng: OsRng = OsRng::new().unwrap(); @@ -53,22 +47,36 @@ impl RangeProof { // Save/label randomness (rho, s_L, s_R) to be used later let _rho = &randomness[0]; let s_l = &randomness[1..len+1]; - let _s_r = &randomness[len+1..2*len+1]; + let s_r = &randomness[len+1..2*len+1]; // Generate y, z by committing to A, S (line 43-45) let (y, z) = commit(&big_a, &big_s); - // Calculate t (line 46) - let a_l = Scalar::from_u64(v); + // Calculate t1, t2 (line 46) + let mut t1 = Scalar::zero(); + let mut t2 = Scalar::zero(); + + let mut v_temp = v.clone(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; - let z3 = z2 * z; - let l0 = a_l - z; - let l1 = s_l; - let r0 = z2; - let mut r1: Vec = Vec::new(); // actually make this an iterator? - // calculate r1 - let mut t = Degree3Poly::new(); - t.d0 = z*y + (a_l - y)*z2 - z3; + + for i in 0..len { + t1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); + t2 += s_l[i] * exp_y * s_r[i]; + + // check if a_l is 0 or 1 + if v_temp & 1 == 0 { + t1 -= s_l[i] * exp_y; + } else { + t1 += s_r[i] * exp_y; + } + + v_temp = v_temp >> 1; // bit-shift v by one + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + // t.d1 = r0*l1 + l0*r1; // t.d2 = r1*l1; @@ -84,16 +92,6 @@ impl RangeProof { } } -impl Degree3Poly { - pub fn new() -> Self { - Self { - d0: Scalar::zero(), - d1: Scalar::zero(), - d2: Scalar::zero(), - } - } -} - pub fn hadamard_product(a: Vec, b: Vec) -> Vec { let mut result = Vec::new(); if a.len() != b.len() { From d1a385dd37f2dd0a87bb0890924282bc7448c808 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 5 Feb 2018 14:33:12 -0800 Subject: [PATCH 006/186] fmt --- src/lib.rs | 249 +++++++++++++++++++++++++++-------------------------- 1 file changed, 129 insertions(+), 120 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 41e9cc6a..c58f4c01 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,158 +7,167 @@ extern crate rand; use std::iter; use std::ops::Add; use std::ops::Mul; -use curve25519_dalek::ristretto::{RistrettoPoint}; +use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; use rand::OsRng; +struct Degree3Poly { + pub d0: Scalar, + pub d1: Scalar, + pub d2: Scalar, +} + +struct RangeProof {} -struct RangeProof { +impl RangeProof { + pub fn generate_proof( + v: u64, + len: usize, + a: &RistrettoPoint, + b: &RistrettoPoint, + ) -> RangeProof { + let mut rng: OsRng = OsRng::new().unwrap(); + + // Generate groups a, b (in the paper: groups g, h) + let b_vec = make_generators(b, len); + let a_vec = make_generators(a, len); + + // Compute big_a (in the paper: A; line 36-39) + let alpha = RistrettoPoint::random(&mut rng); + let mut big_a = alpha.clone(); + for i in 0..len { + let v_i = (v >> i) & 1; + if v_i == 0 { + big_a -= a_vec[i]; + } else { + big_a += b_vec[i]; + } + } + + // Compute big_s (in the paper: S; line 40-42) + let points_iter = iter::once(a).chain(b_vec.iter()).chain(a_vec.iter()); + let randomness: Vec<_> = (0..2 * len + 1).map(|_| Scalar::random(&mut rng)).collect(); + let big_s = ristretto::multiscalar_mult(&randomness, points_iter); + + // Save/label randomness (rho, s_L, s_R) to be used later + let _rho = &randomness[0]; + let s_l = &randomness[1..len + 1]; + let s_r = &randomness[len + 1..2 * len + 1]; + + // Generate y, z by committing to A, S (line 43-45) + let (y, z) = commit(&big_a, &big_s); + + // Calculate t1, t2 (line 46) + let mut t = Degree3Poly::new(); + let mut v_temp = v.clone(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let z2 = z * z; + + for i in 0..len { + t.d1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); + t.d2 += s_l[i] * exp_y * s_r[i]; + // check if a_l is 0 or 1 + if v_temp & 1 == 0 { + t.d1 -= s_l[i] * exp_y; + } else { + t.d1 += s_r[i] * exp_y; + } + v_temp = v_temp >> 1; // bit-shift v by one + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + // Generate x by committing to T_1, T_2 (line 47-51) + // let big_t_1 = b * t.d1 + h * tau_1 + // let (x, _) = commit(t1, t2); + + unimplemented!() + } + pub fn verify_proof() -> Result<(), ()> { + unimplemented!() + } } -impl RangeProof { - pub fn generate_proof(v: u64, len: usize, a: &RistrettoPoint, b: &RistrettoPoint) -> RangeProof { - let mut rng: OsRng = OsRng::new().unwrap(); - - // Generate groups a, b (in the paper: groups g, h) - let b_vec = make_generators(b, len); - let a_vec = make_generators(a, len); - - // Compute big_a (in the paper: A; line 36-39) - let alpha = RistrettoPoint::random(&mut rng); - let mut big_a = alpha.clone(); - for i in 0..len { - let v_i = (v >> i) & 1; - if v_i == 0 { - big_a -= a_vec[i]; - } else { - big_a += b_vec[i]; - } - } - - // Compute big_s (in the paper: S; line 40-42) - let points_iter = iter::once(a).chain(b_vec.iter()).chain(a_vec.iter()); - let randomness: Vec<_> = (0..2*len+1).map(|_| Scalar::random(&mut rng)).collect(); - let big_s = ristretto::multiscalar_mult(&randomness, points_iter); - - // Save/label randomness (rho, s_L, s_R) to be used later - let _rho = &randomness[0]; - let s_l = &randomness[1..len+1]; - let s_r = &randomness[len+1..2*len+1]; - - // Generate y, z by committing to A, S (line 43-45) - let (y, z) = commit(&big_a, &big_s); - - // Calculate t1, t2 (line 46) - let mut t1 = Scalar::zero(); - let mut t2 = Scalar::zero(); - - let mut v_temp = v.clone(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let z2 = z * z; - - for i in 0..len { - t1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); - t2 += s_l[i] * exp_y * s_r[i]; - - // check if a_l is 0 or 1 - if v_temp & 1 == 0 { - t1 -= s_l[i] * exp_y; - } else { - t1 += s_r[i] * exp_y; - } - - v_temp = v_temp >> 1; // bit-shift v by one - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - // t.d1 = r0*l1 + l0*r1; - // t.d2 = r1*l1; - - - // Generate x by committing to T_1, T_2 (line 47-51) - // let (x, _) = commit(t1, t2); - - unimplemented!() - } - - pub fn verify_proof() -> Result<(), ()> { - unimplemented!() - } +impl Degree3Poly { + pub fn new() -> Self { + Self { + d0: Scalar::zero(), + d1: Scalar::zero(), + d2: Scalar::zero(), + } + } } pub fn hadamard_product(a: Vec, b: Vec) -> Vec { - let mut result = Vec::new(); - if a.len() != b.len() { - // throw some error - } - for i in 0..a.len() { - result[i] = a[i] * b[i]; - } - result + let mut result = Vec::new(); + if a.len() != b.len() { + // throw some error + } + for i in 0..a.len() { + result[i] = a[i] * b[i]; + } + result } pub fn inner_product(a: Vec, b: Vec) -> Scalar { - let mut result = Scalar::zero(); - if a.len() != b.len() { - // throw some error - } - for i in 0..a.len() { - result += a[i] * b[i]; - } - result + let mut result = Scalar::zero(); + if a.len() != b.len() { + // throw some error + } + for i in 0..a.len() { + result += a[i] * b[i]; + } + result } -pub fn make_generators(point: &RistrettoPoint, len: usize) - -> Vec -{ - let mut generators = vec![RistrettoPoint::identity(); len]; - - generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); - for i in 1..len { - let prev = generators[i-1].compress(); - generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); - } - generators +pub fn make_generators(point: &RistrettoPoint, len: usize) -> Vec { + let mut generators = vec![RistrettoPoint::identity(); len]; + + generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); + for i in 1..len { + let prev = generators[i - 1].compress(); + generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); + } + generators } pub fn commit(a: &RistrettoPoint, b: &RistrettoPoint) -> (Scalar, Scalar) { - let mut y_digest = Sha512::new(); - y_digest.input(a.compress().as_bytes()); - y_digest.input(b.compress().as_bytes()); - let y = Scalar::from_hash(y_digest); - - let mut z_digest = Sha512::new(); - z_digest.input(a.compress().as_bytes()); - z_digest.input(b.compress().as_bytes()); - z_digest.input(y.as_bytes()); - let z = Scalar::from_hash(z_digest); - - (y, z) + let mut y_digest = Sha512::new(); + y_digest.input(a.compress().as_bytes()); + y_digest.input(b.compress().as_bytes()); + let y = Scalar::from_hash(y_digest); + + let mut z_digest = Sha512::new(); + z_digest.input(a.compress().as_bytes()); + z_digest.input(b.compress().as_bytes()); + z_digest.input(y.as_bytes()); + let z = Scalar::from_hash(z_digest); + + (y, z) } #[cfg(test)] mod tests { - use super::*; + use super::*; #[test] fn test_make_generators() { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - println!("{:?}", make_generators(&RISTRETTO_BASEPOINT_POINT, 20)); + use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; + println!("{:?}", make_generators(&RISTRETTO_BASEPOINT_POINT, 20)); } } mod bench { - use super::*; - use test::Bencher; + use super::*; + use test::Bencher; - #[bench] + #[bench] fn benchmark_make_generators(b: &mut Bencher) { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); + use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; + b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } } From 242e5c6c8c230d2564dec12fe5eef62d69c3094b Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 5 Feb 2018 14:44:08 -0800 Subject: [PATCH 007/186] generate x --- src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c58f4c01..2fa9bf28 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -82,8 +82,11 @@ impl RangeProof { } // Generate x by committing to T_1, T_2 (line 47-51) - // let big_t_1 = b * t.d1 + h * tau_1 - // let (x, _) = commit(t1, t2); + let tau_1 = Scalar::random(&mut rng); + let tau_2 = Scalar::random(&mut rng); + let big_t_1 = b * t.d1 + a * tau_1; + let big_t_2 = b * t.d2 + a * tau_2; + let (x, _) = commit(&big_t_1, &big_t_2); unimplemented!() } From 5c096e30adcda5e7e304cfee7749e2cd7f763430 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 5 Feb 2018 16:23:07 -0800 Subject: [PATCH 008/186] Output range proof --- src/lib.rs | 61 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2fa9bf28..48eb5de5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,13 +14,13 @@ use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; use rand::OsRng; -struct Degree3Poly { - pub d0: Scalar, - pub d1: Scalar, - pub d2: Scalar, -} +struct Degree3Poly(Scalar, Scalar, Scalar); -struct RangeProof {} +struct RangeProof { + tau: Scalar, + mu: Scalar, + t: Scalar, +} impl RangeProof { pub fn generate_proof( @@ -36,8 +36,8 @@ impl RangeProof { let a_vec = make_generators(a, len); // Compute big_a (in the paper: A; line 36-39) - let alpha = RistrettoPoint::random(&mut rng); - let mut big_a = alpha.clone(); + let alpha = Scalar::random(&mut rng); + let mut big_a = b * alpha; for i in 0..len { let v_i = (v >> i) & 1; if v_i == 0 { @@ -53,42 +53,55 @@ impl RangeProof { let big_s = ristretto::multiscalar_mult(&randomness, points_iter); // Save/label randomness (rho, s_L, s_R) to be used later - let _rho = &randomness[0]; + let rho = &randomness[0]; let s_l = &randomness[1..len + 1]; let s_r = &randomness[len + 1..2 * len + 1]; // Generate y, z by committing to A, S (line 43-45) let (y, z) = commit(&big_a, &big_s); - // Calculate t1, t2 (line 46) + // Calculate t (line 46) let mut t = Degree3Poly::new(); let mut v_temp = v.clone(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; + let z3 = z2 * z; for i in 0..len { - t.d1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); - t.d2 += s_l[i] * exp_y * s_r[i]; + t.0 += exp_y * (z - z2) - z3 * exp_2; + t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); + t.2 += s_l[i] * exp_y * s_r[i]; // check if a_l is 0 or 1 if v_temp & 1 == 0 { - t.d1 -= s_l[i] * exp_y; + t.1 -= s_l[i] * exp_y; } else { - t.d1 += s_r[i] * exp_y; + t.0 += z2 * exp_2; + t.1 += s_r[i] * exp_y; } v_temp = v_temp >> 1; // bit-shift v by one exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - // Generate x by committing to T_1, T_2 (line 47-51) + // Generate x by committing to big_t_1, big_t_2 (in the paper: T1, T2; line 47-51) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); - let big_t_1 = b * t.d1 + a * tau_1; - let big_t_2 = b * t.d2 + a * tau_2; - let (x, _) = commit(&big_t_1, &big_t_2); - - unimplemented!() + let big_t_1 = b * t.1 + a * tau_1; + let big_t_2 = b * t.2 + a * tau_2; + let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit + + // Generate final values for proof (line 52-54) + let gamma = Scalar::random(&mut rng); + let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; + let mu = alpha + rho * x; + let t_total = t.0 + t.1 * x + t.2 * x * x; + + RangeProof { + tau: tau_x, + mu: mu, + t: t_total, + } } pub fn verify_proof() -> Result<(), ()> { @@ -97,12 +110,8 @@ impl RangeProof { } impl Degree3Poly { - pub fn new() -> Self { - Self { - d0: Scalar::zero(), - d1: Scalar::zero(), - d2: Scalar::zero(), - } + pub fn new() -> Degree3Poly { + Degree3Poly(Scalar::zero(), Scalar::zero(), Scalar::zero()) } } From 75bd9f1475bb273db41ac650d66b00765e671b95 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 5 Feb 2018 16:35:26 -0800 Subject: [PATCH 009/186] generate l, r --- src/lib.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 48eb5de5..77e50759 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,6 +20,8 @@ struct RangeProof { tau: Scalar, mu: Scalar, t: Scalar, + l: Scalar, // don't need if doing inner product proof + r: Scalar, // don't need if doing inner product proof } impl RangeProof { @@ -97,10 +99,36 @@ impl RangeProof { let mu = alpha + rho * x; let t_total = t.0 + t.1 * x + t.2 * x * x; + // Calculate l, r - which is only necessary if not doing IPP (line 55-57) + // Adding this in a seperate loop so we can remove it easily later + let mut v_temp = v.clone(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let mut l = Scalar::zero(); + let mut r = Scalar::zero(); + + for i in 0..len { + let a_l = v_temp & 1; + + // is it ok to convert a_l to scalar? + l += Scalar::from_u64(a_l) - z + s_l[i] * x; + r += exp_y * (z + s_r[i] * x); + if a_l == 0 { + r -= exp_y + } + + v_temp = v_temp >> 1; // bit-shift v by one + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + // Send proof to verifier! (line 58) RangeProof { tau: tau_x, mu: mu, t: t_total, + l: l, + r: r, } } From 58e3b246c32baa312d76bacda5b396c2024fea4d Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 6 Feb 2018 11:31:06 -0800 Subject: [PATCH 010/186] Use the crates.io version of dalek, now that it's released. --- Cargo.toml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 8a65a178..d3fed8f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,9 @@ [package] -name = "ristretto-bp" +name = "ristretto-bulletproofs" version = "0.1.0" authors = ["Cathie "] [dependencies] -#curve25519-dalek = "^0.14" -curve25519-dalek = { git = "https://github.com/dalek-cryptography/curve25519-dalek", branch = "develop", features = ["nightly"]} +curve25519-dalek = { version = "^0.15", features = ["nightly"] } sha2 = "^0.7" -rand = "^0.4" \ No newline at end of file +rand = "^0.4" From cae7675dce28a5025bbb0a173ca94917ebc88a88 Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 6 Feb 2018 14:29:48 -0800 Subject: [PATCH 011/186] Add tests --- src/lib.rs | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 77e50759..2bcf2671 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,6 +22,9 @@ struct RangeProof { t: Scalar, l: Scalar, // don't need if doing inner product proof r: Scalar, // don't need if doing inner product proof + + b: RistrettoPoint, + a: RistrettoPoint, } impl RangeProof { @@ -33,7 +36,7 @@ impl RangeProof { ) -> RangeProof { let mut rng: OsRng = OsRng::new().unwrap(); - // Generate groups a, b (in the paper: groups g, h) + // Generate groups b, a (in the paper: groups g, h) let b_vec = make_generators(b, len); let a_vec = make_generators(a, len); @@ -106,7 +109,6 @@ impl RangeProof { let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let mut l = Scalar::zero(); let mut r = Scalar::zero(); - for i in 0..len { let a_l = v_temp & 1; @@ -122,17 +124,24 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - // Send proof to verifier! (line 58) + // Generate proof! (line 58) RangeProof { tau: tau_x, mu: mu, t: t_total, l: l, r: r, + + b: *b, + a: *a, } } - pub fn verify_proof() -> Result<(), ()> { + pub fn verify_proof(&self) -> Result<(), ()> { + // line 60 + if self.t != self.l * self.r { + // throw some error + } unimplemented!() } } @@ -199,6 +208,14 @@ mod tests { use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; println!("{:?}", make_generators(&RISTRETTO_BASEPOINT_POINT, 20)); } + #[test] + fn test_t() { + let b = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let a = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + + let rp = RangeProof::generate_proof(153, 5, &b, &a); + assert_eq!(rp.t, rp.r * rp.l); + } } mod bench { From 78d6e2679790c4cf6f2e9e025c56c6faa83c86d0 Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 6 Feb 2018 16:47:31 -0800 Subject: [PATCH 012/186] Starting verification --- src/lib.rs | 240 ++++++++++++++++++++++++++++------------------------- 1 file changed, 127 insertions(+), 113 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2bcf2671..0aeb8c01 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,216 +5,230 @@ extern crate sha2; extern crate test; extern crate rand; use std::iter; -use std::ops::Add; -use std::ops::Mul; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; -use rand::OsRng; +// use rand::OsRng; +use rand::SeedableRng; +use rand::StdRng; -struct Degree3Poly(Scalar, Scalar, Scalar); +struct Polynomial(Scalar, Scalar, Scalar); struct RangeProof { - tau: Scalar, + tau_x: Scalar, mu: Scalar, t: Scalar, - l: Scalar, // don't need if doing inner product proof - r: Scalar, // don't need if doing inner product proof - b: RistrettoPoint, - a: RistrettoPoint, + // don't need if doing inner product proof + l: Scalar, + r: Scalar, + + // committed values + big_v: RistrettoPoint, + big_a: RistrettoPoint, + big_s: RistrettoPoint, + big_t_1: RistrettoPoint, + big_t_2: RistrettoPoint, + + // public knowledge + n: usize, + g: RistrettoPoint, + h: RistrettoPoint, } impl RangeProof { + pub fn generate_proof( v: u64, - len: usize, - a: &RistrettoPoint, - b: &RistrettoPoint, + n: usize, + g: &RistrettoPoint, + h: &RistrettoPoint, ) -> RangeProof { - let mut rng: OsRng = OsRng::new().unwrap(); + // let mut rng: OsRng = OsRng::new().unwrap(); + let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - // Generate groups b, a (in the paper: groups g, h) - let b_vec = make_generators(b, len); - let a_vec = make_generators(a, len); + // Setup: generate groups g & h, commit to v + let g_vec = make_generators(g, n); + let h_vec = make_generators(h, n); + let gamma = Scalar::random(&mut rng); + let big_v = h * gamma + g * Scalar::from_u64(v); // Compute big_a (in the paper: A; line 36-39) let alpha = Scalar::random(&mut rng); - let mut big_a = b * alpha; - for i in 0..len { + let mut big_a = h * alpha; + for i in 0..n { let v_i = (v >> i) & 1; if v_i == 0 { - big_a -= a_vec[i]; + big_a -= h_vec[i]; } else { - big_a += b_vec[i]; + big_a += g_vec[i]; } } // Compute big_s (in the paper: S; line 40-42) - let points_iter = iter::once(a).chain(b_vec.iter()).chain(a_vec.iter()); - let randomness: Vec<_> = (0..2 * len + 1).map(|_| Scalar::random(&mut rng)).collect(); + let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); + let randomness: Vec<_> = (0..2 * n + 1).map(|_| Scalar::random(&mut rng)).collect(); let big_s = ristretto::multiscalar_mult(&randomness, points_iter); // Save/label randomness (rho, s_L, s_R) to be used later let rho = &randomness[0]; - let s_l = &randomness[1..len + 1]; - let s_r = &randomness[len + 1..2 * len + 1]; + let s_l = &randomness[1..n + 1]; + let s_r = &randomness[n + 1..2 * n + 1]; // Generate y, z by committing to A, S (line 43-45) let (y, z) = commit(&big_a, &big_s); // Calculate t (line 46) - let mut t = Degree3Poly::new(); + let mut t = Polynomial::new(); + let mut l = Polynomial::new(); + let mut r = Polynomial::new(); + let mut v_temp = v.clone(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; - let z3 = z2 * z; - for i in 0..len { - t.0 += exp_y * (z - z2) - z3 * exp_2; - t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); - t.2 += s_l[i] * exp_y * s_r[i]; - // check if a_l is 0 or 1 - if v_temp & 1 == 0 { - t.1 -= s_l[i] * exp_y; + for i in 0..n { + let a_l = v_temp & 1; + l.0 += -z; + l.1 += s_l[i]; + r.0 += exp_y * z + z2 * exp_2; + r.1 += exp_y * s_r[i]; + if a_l == 0 { + r.0 -= exp_y; } else { - t.0 += z2 * exp_2; - t.1 += s_r[i] * exp_y; + l.0 += Scalar::one(); } v_temp = v_temp >> 1; // bit-shift v by one exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } + t.0 = l.0 * r.0; + t.1 = l.1 * r.0 + l.0 * r.1; + t.2 = l.1 * r.1; + // Generate x by committing to big_t_1, big_t_2 (in the paper: T1, T2; line 47-51) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); - let big_t_1 = b * t.1 + a * tau_1; - let big_t_2 = b * t.2 + a * tau_2; + let big_t_1 = g * t.1 + h * tau_1; + let big_t_2 = g * t.2 + h * tau_2; let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit // Generate final values for proof (line 52-54) - let gamma = Scalar::random(&mut rng); let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; let mu = alpha + rho * x; let t_total = t.0 + t.1 * x + t.2 * x * x; - // Calculate l, r - which is only necessary if not doing IPP (line 55-57) - // Adding this in a seperate loop so we can remove it easily later - let mut v_temp = v.clone(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let mut l = Scalar::zero(); - let mut r = Scalar::zero(); - for i in 0..len { - let a_l = v_temp & 1; - - // is it ok to convert a_l to scalar? - l += Scalar::from_u64(a_l) - z + s_l[i] * x; - r += exp_y * (z + s_r[i] * x); - if a_l == 0 { - r -= exp_y - } - - v_temp = v_temp >> 1; // bit-shift v by one - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - // Generate proof! (line 58) + // Generate proof! (line 55-58) RangeProof { - tau: tau_x, + tau_x: tau_x, mu: mu, t: t_total, - l: l, - r: r, - - b: *b, - a: *a, + l: l.0 + l.1 * x, + r: r.0 + r.1 * x, + + big_v: big_v, + big_a: big_a, + big_s: big_s, + big_t_1: big_t_1, + big_t_2: big_t_2, + + n: n, + g: *g, + h: *h, } } - pub fn verify_proof(&self) -> Result<(), ()> { + pub fn verify_proof(&self) -> bool { + let (y, z) = commit(&self.big_a, &self.big_s); + let (x, _) = commit(&self.big_t_1, &self.big_t_2); + // line 60 if self.t != self.l * self.r { - // throw some error + println!("fails check on line 60: t != l * r"); + return false } - unimplemented!() - } -} -impl Degree3Poly { - pub fn new() -> Degree3Poly { - Degree3Poly(Scalar::zero(), Scalar::zero(), Scalar::zero()) - } -} + // line 61 + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 -pub fn hadamard_product(a: Vec, b: Vec) -> Vec { - let mut result = Vec::new(); - if a.len() != b.len() { - // throw some error - } - for i in 0..a.len() { - result[i] = a[i] * b[i]; + for _ in 0..self.n { + power_g += -z2 * exp_y - z3 * exp_2 + z * exp_y; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; + let t_commit = self.g * self.t + self.h * self.tau_x; + if t_commit != t_check { + println!("fails check on line 61"); + return false + } + + // line 62 + + return true } - result } -pub fn inner_product(a: Vec, b: Vec) -> Scalar { - let mut result = Scalar::zero(); - if a.len() != b.len() { - // throw some error +impl Polynomial { + pub fn new() -> Polynomial { + Polynomial(Scalar::zero(), Scalar::zero(), Scalar::zero()) } - for i in 0..a.len() { - result += a[i] * b[i]; - } - result } -pub fn make_generators(point: &RistrettoPoint, len: usize) -> Vec { - let mut generators = vec![RistrettoPoint::identity(); len]; +pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { + let mut generators = vec![RistrettoPoint::identity(); n]; generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); - for i in 1..len { + for i in 1..n { let prev = generators[i - 1].compress(); generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); } generators } -pub fn commit(a: &RistrettoPoint, b: &RistrettoPoint) -> (Scalar, Scalar) { +pub fn commit(g: &RistrettoPoint, h: &RistrettoPoint) -> (Scalar, Scalar) { let mut y_digest = Sha512::new(); - y_digest.input(a.compress().as_bytes()); - y_digest.input(b.compress().as_bytes()); - let y = Scalar::from_hash(y_digest); + y_digest.input(g.compress().as_bytes()); + y_digest.input(h.compress().as_bytes()); + let c1 = Scalar::from_hash(y_digest); let mut z_digest = Sha512::new(); - z_digest.input(a.compress().as_bytes()); - z_digest.input(b.compress().as_bytes()); - z_digest.input(y.as_bytes()); - let z = Scalar::from_hash(z_digest); + z_digest.input(g.compress().as_bytes()); + z_digest.input(h.compress().as_bytes()); + z_digest.input(c1.as_bytes()); + let c2 = Scalar::from_hash(z_digest); - (y, z) + (c1, c2) } #[cfg(test)] mod tests { use super::*; #[test] - fn test_make_generators() { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - println!("{:?}", make_generators(&RISTRETTO_BASEPOINT_POINT, 20)); + fn test_t() { + let g = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let h = RistrettoPoint::hash_from_bytes::("there".as_bytes()); + + let rp = RangeProof::generate_proof(153, 10, &g, &h); + assert_eq!(rp.t, rp.l * rp.r); } #[test] - fn test_t() { - let b = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let a = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + fn test_verify() { + let g = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let h = RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let rp = RangeProof::generate_proof(153, 5, &b, &a); - assert_eq!(rp.t, rp.r * rp.l); + let rp = RangeProof::generate_proof(153, 10, &b, &a); + assert_eq!(rp.verify_proof(), true); } } From df128e3d796e740ccae136ca57311b404bf3dc5c Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 7 Feb 2018 16:51:58 -0800 Subject: [PATCH 013/186] messy but working proof --- src/lib.rs | 223 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 172 insertions(+), 51 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0aeb8c01..884fce16 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,8 +22,8 @@ struct RangeProof { t: Scalar, // don't need if doing inner product proof - l: Scalar, - r: Scalar, + l: Vec, + r: Vec, // committed values big_v: RistrettoPoint, @@ -43,13 +43,14 @@ impl RangeProof { pub fn generate_proof( v: u64, n: usize, - g: &RistrettoPoint, - h: &RistrettoPoint, ) -> RangeProof { // let mut rng: OsRng = OsRng::new().unwrap(); let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); + // Setup: generate groups g & h, commit to v + let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let h = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let g_vec = make_generators(g, n); let h_vec = make_generators(h, n); let gamma = Scalar::random(&mut rng); @@ -69,66 +70,144 @@ impl RangeProof { // Compute big_s (in the paper: S; line 40-42) let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); - let randomness: Vec<_> = (0..2 * n + 1).map(|_| Scalar::random(&mut rng)).collect(); + let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); let big_s = ristretto::multiscalar_mult(&randomness, points_iter); // Save/label randomness (rho, s_L, s_R) to be used later let rho = &randomness[0]; - let s_l = &randomness[1..n + 1]; - let s_r = &randomness[n + 1..2 * n + 1]; + let s_l = &randomness[1..(n + 1)]; + let s_r = &randomness[(n + 1)..(1 + 2 * n)]; // Generate y, z by committing to A, S (line 43-45) let (y, z) = commit(&big_a, &big_s); // Calculate t (line 46) + + // APPROACH 1 TO CALCULATING T: + // calculate vectors l0, l1, r0, r1 and multiply + let mut l0 = vec![Scalar::zero(); n]; + let mut l1 = vec![Scalar::zero(); n]; + let mut r0 = vec![Scalar::zero(); n]; + let mut r1 = vec![Scalar::zero(); n]; let mut t = Polynomial::new(); - let mut l = Polynomial::new(); - let mut r = Polynomial::new(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + + for i in 0..n { + let v_i = (v >> i) & 1; + let a_l = Scalar::from_u64(v_i); + let a_r = a_l - Scalar::one(); + + l0[i] += a_l - z; + l1[i] += s_l[i]; + r0[i] += exp_y * (a_r + z) + z * z * exp_2; + r1[i] += exp_y * s_r[i]; + println!("v_i at position {:?}: {:?}", i, v_i); + // if v_i == 0 { + // r0[i] -= exp_y; + // } else { + // l0[i] += Scalar::one(); + // } + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } - let mut v_temp = v.clone(); + t.0 = inner_product(&l0, &r0); + t.1 = inner_product(&l0, &r1) + inner_product(&l1, &r0); + t.2 = inner_product(&l1, &r1); + println!("t0: {:?}", t.0); + println!("t1: {:?}", t.1); + println!("t2: {:?}", t.2); + + // let mut t2 = Polynomial::new(); + // for i in 0..n { + // t2.0 += l0[i] * r0[i]; + // t2.1 += l0[i] * r1[i] + l1[i] * r0[i]; + // t2.2 += l1[i] * r1[i]; + // } + + // println!("t2_0: {:?}", t2.0); + // println!("t2_1: {:?}", t2.1); + // println!("t2_2: {:?}", t2.2); + + /* + // APPROACH 2 TO CALCULATING T: + // calculate scalars t0, t1, t2 seperately + let mut t = Polynomial::new(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; + let z3 = z2 * z; for i in 0..n { - let a_l = v_temp & 1; - l.0 += -z; - l.1 += s_l[i]; - r.0 += exp_y * z + z2 * exp_2; - r.1 += exp_y * s_r[i]; - if a_l == 0 { - r.0 -= exp_y; + let v_i = (v >> i) & 1; + t.0 += exp_y * (z - z2) - z3 * exp_2; + t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); + t.2 += s_l[i] * exp_y * s_r[i]; + // check if a_l is 0 or 1 + if v_i == 0 { + t.1 -= s_l[i] * exp_y; } else { - l.0 += Scalar::one(); + t.0 += z2 * exp_2; + t.1 += s_r[i] * exp_y; } - v_temp = v_temp >> 1; // bit-shift v by one exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - - t.0 = l.0 * r.0; - t.1 = l.1 * r.0 + l.0 * r.1; - t.2 = l.1 * r.1; + println!("t0: {:?}", t.0); + println!("t1: {:?}", t.1); + println!("t2: {:?}", t.2); + */ // Generate x by committing to big_t_1, big_t_2 (in the paper: T1, T2; line 47-51) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); let big_t_1 = g * t.1 + h * tau_1; let big_t_2 = g * t.2 + h * tau_2; - let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit + let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? // Generate final values for proof (line 52-54) - let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; + let tau_x = tau_1 * x + tau_2 * x * x + z * z * gamma; let mu = alpha + rho * x; let t_total = t.0 + t.1 * x + t.2 * x * x; - // Generate proof! (line 55-58) + // Calculate l, r - which is only necessary if not doing IPP (line 55-57) + // Adding this in a seperate loop so we can remove it easily later + + let mut l = vec![Scalar::zero(); n]; + let mut r = vec![Scalar::zero(); n]; + + /* + // APPROACH 1 TO CALCULATING l, r + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for i in 0..n { + let a_l = (v >> i) & 1; + + // is it ok to convert a_l to scalar? + l += Scalar::from_u64(a_l) - z + s_l[i] * x; + r += exp_y * (z + s_r[i] * x) + z * z * exp_2; + if a_l == 0 { + r -= exp_y + } + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + */ + + // APPROACH 2 TO CALCULATING l, r + for i in 0..n { + l[i] += l0[i] + l1[i] * x; + r[i] += r0[i] + r1[i] * x; + } + + // Generate proof! (line 58) RangeProof { tau_x: tau_x, mu: mu, t: t_total, - l: l.0 + l.1 * x, - r: r.0 + r.1 * x, + l: l, + r: r, big_v: big_v, big_a: big_a, @@ -147,7 +226,7 @@ impl RangeProof { let (x, _) = commit(&self.big_t_1, &self.big_t_2); // line 60 - if self.t != self.l * self.r { + if self.t != inner_product(&self.l, &self.r) { println!("fails check on line 60: t != l * r"); return false } @@ -166,6 +245,7 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } + let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; let t_commit = self.g * self.t + self.h * self.tau_x; if t_commit != t_check { @@ -173,8 +253,6 @@ impl RangeProof { return false } - // line 62 - return true } } @@ -196,39 +274,82 @@ pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec generators } -pub fn commit(g: &RistrettoPoint, h: &RistrettoPoint) -> (Scalar, Scalar) { - let mut y_digest = Sha512::new(); - y_digest.input(g.compress().as_bytes()); - y_digest.input(h.compress().as_bytes()); - let c1 = Scalar::from_hash(y_digest); +pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { + let mut c1_digest = Sha512::new(); + c1_digest.input(v1.compress().as_bytes()); + c1_digest.input(v2.compress().as_bytes()); + let c1 = Scalar::from_hash(c1_digest); - let mut z_digest = Sha512::new(); - z_digest.input(g.compress().as_bytes()); - z_digest.input(h.compress().as_bytes()); - z_digest.input(c1.as_bytes()); - let c2 = Scalar::from_hash(z_digest); + let mut c2_digest = Sha512::new(); + c2_digest.input(v1.compress().as_bytes()); + c2_digest.input(v2.compress().as_bytes()); + c2_digest.input(c1.as_bytes()); + let c2 = Scalar::from_hash(c2_digest); (c1, c2) } +pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { + let mut result = Scalar::zero(); + if a.len() != b.len() { + // throw some error + } + for i in 0..a.len() { + result += a[i] * b[i]; + } + result +} + #[cfg(test)] mod tests { use super::*; #[test] + fn test_inner_product() { + let a = vec![Scalar::from_u64(1), Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4)]; + let b = vec![Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), Scalar::from_u64(5)]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } + #[test] fn test_t() { - let g = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let h = RistrettoPoint::hash_from_bytes::("there".as_bytes()); - - let rp = RangeProof::generate_proof(153, 10, &g, &h); - assert_eq!(rp.t, rp.l * rp.r); + let rp = RangeProof::generate_proof(1, 1); + assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); + let rp = RangeProof::generate_proof(1, 2); + assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); } #[test] - fn test_verify() { - let g = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let h = RistrettoPoint::hash_from_bytes::("there".as_bytes()); - - let rp = RangeProof::generate_proof(153, 10, &b, &a); + fn test_verify_one() { + let rp = RangeProof::generate_proof(0, 1); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(1, 1); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2, 1); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(3, 1); + assert_eq!(rp.verify_proof(), false); + } + #[test] + fn test_verify_two() { + let rp = RangeProof::generate_proof(0, 2); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(1, 2); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(3, 2); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(4, 2); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(8, 2); + assert_eq!(rp.verify_proof(), false); + } + #[test] + fn test_verify_large() { + let rp = RangeProof::generate_proof(250, 8); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(300, 8); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(1000000, 20); assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(1050000, 20); + assert_eq!(rp.verify_proof(), false); } } From 481ac92f9d0a9f17378f3b516c1bec8a8a3a5108 Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 8 Feb 2018 12:10:53 -0800 Subject: [PATCH 014/186] implement last verification check --- src/lib.rs | 207 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 122 insertions(+), 85 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 884fce16..721d9ef3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,8 @@ use curve25519_dalek::scalar::Scalar; use rand::SeedableRng; use rand::StdRng; -struct Polynomial(Scalar, Scalar, Scalar); +struct PolyDeg3(Scalar, Scalar, Scalar); +struct VecPoly2(Vec, Vec); struct RangeProof { tau_x: Scalar, @@ -48,7 +49,7 @@ impl RangeProof { let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - // Setup: generate groups g & h, commit to v + // Setup: generate groups g & h, commit to v (line 34) let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let h = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let g_vec = make_generators(g, n); @@ -56,7 +57,7 @@ impl RangeProof { let gamma = Scalar::random(&mut rng); let big_v = h * gamma + g * Scalar::from_u64(v); - // Compute big_a (in the paper: A; line 36-39) + // Compute big_a (line 39-42) let alpha = Scalar::random(&mut rng); let mut big_a = h * alpha; for i in 0..n { @@ -68,7 +69,7 @@ impl RangeProof { } } - // Compute big_s (in the paper: S; line 40-42) + // Compute big_s (in the paper: S; line 43-45) let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); let big_s = ristretto::multiscalar_mult(&randomness, points_iter); @@ -78,18 +79,16 @@ impl RangeProof { let s_l = &randomness[1..(n + 1)]; let s_r = &randomness[(n + 1)..(1 + 2 * n)]; - // Generate y, z by committing to A, S (line 43-45) + // Generate y, z by committing to A, S (line 46-48) let (y, z) = commit(&big_a, &big_s); - // Calculate t (line 46) + // Calculate t // APPROACH 1 TO CALCULATING T: // calculate vectors l0, l1, r0, r1 and multiply - let mut l0 = vec![Scalar::zero(); n]; - let mut l1 = vec![Scalar::zero(); n]; - let mut r0 = vec![Scalar::zero(); n]; - let mut r1 = vec![Scalar::zero(); n]; - let mut t = Polynomial::new(); + let mut l = VecPoly2::new(n); + let mut r = VecPoly2::new(n); + let mut t = PolyDeg3::new(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 @@ -98,11 +97,10 @@ impl RangeProof { let a_l = Scalar::from_u64(v_i); let a_r = a_l - Scalar::one(); - l0[i] += a_l - z; - l1[i] += s_l[i]; - r0[i] += exp_y * (a_r + z) + z * z * exp_2; - r1[i] += exp_y * s_r[i]; - println!("v_i at position {:?}: {:?}", i, v_i); + l.0[i] += a_l - z; + l.1[i] += s_l[i]; + r.0[i] += exp_y * (a_r + z) + z * z * exp_2; + r.1[i] += exp_y * s_r[i]; // if v_i == 0 { // r0[i] -= exp_y; // } else { @@ -112,12 +110,9 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - t.0 = inner_product(&l0, &r0); - t.1 = inner_product(&l0, &r1) + inner_product(&l1, &r0); - t.2 = inner_product(&l1, &r1); - println!("t0: {:?}", t.0); - println!("t1: {:?}", t.1); - println!("t2: {:?}", t.2); + t.0 = inner_product(&l.0, &r.0); + t.1 = inner_product(&l.0, &r.1) + inner_product(&l.1, &r.0); + t.2 = inner_product(&l.1, &r.1); // let mut t2 = Polynomial::new(); // for i in 0..n { @@ -159,24 +154,21 @@ impl RangeProof { println!("t2: {:?}", t.2); */ - // Generate x by committing to big_t_1, big_t_2 (in the paper: T1, T2; line 47-51) + // Generate x by committing to big_t_1, big_t_2 (line 49-54) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); let big_t_1 = g * t.1 + h * tau_1; let big_t_2 = g * t.2 + h * tau_2; let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? - // Generate final values for proof (line 52-54) + // Generate final values for proof (line 55-60) let tau_x = tau_1 * x + tau_2 * x * x + z * z * gamma; let mu = alpha + rho * x; - let t_total = t.0 + t.1 * x + t.2 * x * x; + let t_hat = t.0 + t.1 * x + t.2 * x * x; // Calculate l, r - which is only necessary if not doing IPP (line 55-57) // Adding this in a seperate loop so we can remove it easily later - let mut l = vec![Scalar::zero(); n]; - let mut r = vec![Scalar::zero(); n]; - /* // APPROACH 1 TO CALCULATING l, r let mut exp_y = Scalar::one(); // start at y^0 = 1 @@ -196,18 +188,16 @@ impl RangeProof { */ // APPROACH 2 TO CALCULATING l, r - for i in 0..n { - l[i] += l0[i] + l1[i] * x; - r[i] += r0[i] + r1[i] * x; - } + let l_total = l.eval(x); + let r_total = r.eval(x); - // Generate proof! (line 58) + // Generate proof! (line 61) RangeProof { tau_x: tau_x, mu: mu, - t: t_total, - l: l, - r: r, + t: t_hat, + l: l_total, + r: r_total, big_v: big_v, big_a: big_a, @@ -221,48 +211,97 @@ impl RangeProof { } } + pub fn fancy_gen_proof(&self) -> RangeProof { + + + unimplemented!() + } + pub fn verify_proof(&self) -> bool { let (y, z) = commit(&self.big_a, &self.big_s); let (x, _) = commit(&self.big_t_1, &self.big_t_2); + let g_vec = make_generators(&self.g, self.n); + let mut hprime_vec = make_generators(&self.h, self.n); - // line 60 - if self.t != inner_product(&self.l, &self.r) { - println!("fails check on line 60: t != l * r"); - return false - } + // line 62: calculate hprime_vec + let mut exp_y = Scalar::one(); // start at y^0 = 1 + for i in 0..self.n { + hprime_vec[i] = hprime_vec[i] * Scalar::invert(&exp_y); + exp_y = exp_y * y; // y^i -> y^(i+1) + } - // line 61 + // line 63 let z2 = z * z; let z3 = z2 * z; let mut power_g = Scalar::zero(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..self.n { power_g += -z2 * exp_y - z3 * exp_2 + z * exp_y; exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - - let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; let t_commit = self.g * self.t + self.h * self.tau_x; if t_commit != t_check { - println!("fails check on line 61"); + println!("fails check on line 63"); return false } + // line 64: calculate big_p + let mut big_p = self.big_a + self.big_s * x; + + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for i in 0..self.n { + big_p -= g_vec[i] * z; // IS THIS RIGHT? + big_p += hprime_vec[i] * (z * exp_y + z * z * exp_2); + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + // line 65: check big_p against l, r + let mut big_p_check = self.h * self.mu; + for i in 0..self.n { + big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; + } + if big_p != big_p_check { + println!("fails check on line 65: big_p != g * l + hprime * r"); + return false + } + + // line 66: check t = l * r + if self.t != inner_product(&self.l, &self.r) { + println!("fails check on line 66: t != l * r"); + return false + } + return true } } -impl Polynomial { - pub fn new() -> Polynomial { - Polynomial(Scalar::zero(), Scalar::zero(), Scalar::zero()) +impl PolyDeg3 { + pub fn new() -> PolyDeg3 { + PolyDeg3(Scalar::zero(), Scalar::zero(), Scalar::zero()) } } +impl VecPoly2 { + pub fn new(n: usize) -> VecPoly2 { + VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + } + pub fn eval(&self, x: Scalar) -> Vec { + let n = self.0.len(); + let mut out = vec![Scalar::zero(); n]; + for i in 0..n { + out[i] += self.0[i] + self.1[i] * x; + } + out + } +} + pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { let mut generators = vec![RistrettoPoint::identity(); n]; @@ -290,14 +329,15 @@ pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { } pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { - let mut result = Scalar::zero(); + let mut out = Scalar::zero(); if a.len() != b.len() { // throw some error + println!("lengths of vectors don't match for inner product multiplication"); } for i in 0..a.len() { - result += a[i] * b[i]; + out += a[i] * b[i]; } - result + out } #[cfg(test)] @@ -317,40 +357,27 @@ mod tests { assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); } #[test] - fn test_verify_one() { - let rp = RangeProof::generate_proof(0, 1); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(1, 1); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2, 1); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(3, 1); - assert_eq!(rp.verify_proof(), false); - } - #[test] - fn test_verify_two() { - let rp = RangeProof::generate_proof(0, 2); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(1, 2); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(3, 2); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(4, 2); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(8, 2); - assert_eq!(rp.verify_proof(), false); - } - #[test] - fn test_verify_large() { - let rp = RangeProof::generate_proof(250, 8); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(300, 8); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(1000000, 20); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(1050000, 20); - assert_eq!(rp.verify_proof(), false); + fn test_verify() { + for n in &[1, 2, 4, 8, 16, 32] { + println!("n: {:?}", n); + let rp = RangeProof::generate_proof(0, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(u64::max_value(), *n); + assert_eq!(rp.verify_proof(), false); + } } + // #[test] + // fn test_verify_rand() { + // for i in 0..1000 { + // let n = 32; + // } + // } } mod bench { @@ -362,4 +389,14 @@ mod bench { use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } + #[bench] + fn benchmark_make_proofs(b: &mut Bencher) { + for n in &[4, 8, 16, 32] { + b.iter(|| RangeProof::generate_proof(0, *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32), *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n)); + b.iter(|| RangeProof::generate_proof(u64::max_value(), *n)); + } + } } From e0577a3bc5a4be68d38132d1167efa8d4e65a8af Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 8 Feb 2018 12:45:02 -0800 Subject: [PATCH 015/186] Switch to efficient proof gen --- src/lib.rs | 238 +++++++++++++++++++++++++++-------------------------- 1 file changed, 121 insertions(+), 117 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 721d9ef3..d40c6d38 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,9 +10,9 @@ use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; -// use rand::OsRng; -use rand::SeedableRng; -use rand::StdRng; +use rand::{OsRng, Rng}; +// use rand::SeedableRng +// use rand::StdRng; struct PolyDeg3(Scalar, Scalar, Scalar); struct VecPoly2(Vec, Vec); @@ -23,7 +23,7 @@ struct RangeProof { t: Scalar, // don't need if doing inner product proof - l: Vec, + l: Vec, r: Vec, // committed values @@ -40,14 +40,10 @@ struct RangeProof { } impl RangeProof { - - pub fn generate_proof( - v: u64, - n: usize, - ) -> RangeProof { - // let mut rng: OsRng = OsRng::new().unwrap(); - let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - + pub fn generate_proof(v: u64, n: usize) -> RangeProof { + let mut rng: OsRng = OsRng::new().unwrap(); + // useful for debugging: + // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); // Setup: generate groups g & h, commit to v (line 34) let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); @@ -84,6 +80,7 @@ impl RangeProof { // Calculate t + /* // APPROACH 1 TO CALCULATING T: // calculate vectors l0, l1, r0, r1 and multiply let mut l = VecPoly2::new(n); @@ -113,29 +110,18 @@ impl RangeProof { t.0 = inner_product(&l.0, &r.0); t.1 = inner_product(&l.0, &r.1) + inner_product(&l.1, &r.0); t.2 = inner_product(&l.1, &r.1); + */ - // let mut t2 = Polynomial::new(); - // for i in 0..n { - // t2.0 += l0[i] * r0[i]; - // t2.1 += l0[i] * r1[i] + l1[i] * r0[i]; - // t2.2 += l1[i] * r1[i]; - // } - - // println!("t2_0: {:?}", t2.0); - // println!("t2_1: {:?}", t2.1); - // println!("t2_2: {:?}", t2.2); - - /* // APPROACH 2 TO CALCULATING T: // calculate scalars t0, t1, t2 seperately - let mut t = Polynomial::new(); + let mut t = PolyDeg3::new(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; let z3 = z2 * z; for i in 0..n { - let v_i = (v >> i) & 1; + let v_i = (v >> i) & 1; t.0 += exp_y * (z - z2) - z3 * exp_2; t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); t.2 += s_l[i] * exp_y * s_r[i]; @@ -149,10 +135,6 @@ impl RangeProof { exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - println!("t0: {:?}", t.0); - println!("t1: {:?}", t.1); - println!("t2: {:?}", t.2); - */ // Generate x by committing to big_t_1, big_t_2 (line 49-54) let tau_1 = Scalar::random(&mut rng); @@ -169,27 +151,30 @@ impl RangeProof { // Calculate l, r - which is only necessary if not doing IPP (line 55-57) // Adding this in a seperate loop so we can remove it easily later - /* // APPROACH 1 TO CALCULATING l, r let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let mut l_total = Vec::new(); + let mut r_total = Vec::new(); + for i in 0..n { - let a_l = (v >> i) & 1; + let a_l = (v >> i) & 1; // is it ok to convert a_l to scalar? - l += Scalar::from_u64(a_l) - z + s_l[i] * x; - r += exp_y * (z + s_r[i] * x) + z * z * exp_2; + l_total.push(Scalar::from_u64(a_l) - z + s_l[i] * x); + r_total.push(exp_y * (z + s_r[i] * x) + z * z * exp_2); if a_l == 0 { - r -= exp_y + r_total[i] -= exp_y } exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - */ - + } + + /* // APPROACH 2 TO CALCULATING l, r let l_total = l.eval(x); let r_total = r.eval(x); + */ // Generate proof! (line 61) RangeProof { @@ -199,7 +184,7 @@ impl RangeProof { l: l_total, r: r_total, - big_v: big_v, + big_v: big_v, big_a: big_a, big_s: big_s, big_t_1: big_t_1, @@ -211,42 +196,36 @@ impl RangeProof { } } - pub fn fancy_gen_proof(&self) -> RangeProof { - - - unimplemented!() - } - pub fn verify_proof(&self) -> bool { - let (y, z) = commit(&self.big_a, &self.big_s); - let (x, _) = commit(&self.big_t_1, &self.big_t_2); + let (y, z) = commit(&self.big_a, &self.big_s); + let (x, _) = commit(&self.big_t_1, &self.big_t_2); let g_vec = make_generators(&self.g, self.n); let mut hprime_vec = make_generators(&self.h, self.n); - // line 62: calculate hprime_vec - let mut exp_y = Scalar::one(); // start at y^0 = 1 - for i in 0..self.n { - hprime_vec[i] = hprime_vec[i] * Scalar::invert(&exp_y); - exp_y = exp_y * y; // y^i -> y^(i+1) - } + // line 62: calculate hprime_vec + let mut exp_y = Scalar::one(); // start at y^0 = 1 + for i in 0..self.n { + hprime_vec[i] = hprime_vec[i] * Scalar::invert(&exp_y); + exp_y = exp_y * y; // y^i -> y^(i+1) + } // line 63 let z2 = z * z; let z3 = z2 * z; let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 for _ in 0..self.n { - power_g += -z2 * exp_y - z3 * exp_2 + z * exp_y; + power_g += -z2 * exp_y - z3 * exp_2 + z * exp_y; - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; let t_commit = self.g * self.t + self.h * self.tau_x; if t_commit != t_check { - println!("fails check on line 63"); - return false + println!("fails check on line 63"); + return false; } // line 64: calculate big_p @@ -255,8 +234,8 @@ impl RangeProof { let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 for i in 0..self.n { - big_p -= g_vec[i] * z; // IS THIS RIGHT? - big_p += hprime_vec[i] * (z * exp_y + z * z * exp_2); + big_p -= g_vec[i] * z; // IS THIS RIGHT? + big_p += hprime_vec[i] * (z * exp_y + z * z * exp_2); exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) @@ -265,20 +244,20 @@ impl RangeProof { // line 65: check big_p against l, r let mut big_p_check = self.h * self.mu; for i in 0..self.n { - big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; + big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; } if big_p != big_p_check { - println!("fails check on line 65: big_p != g * l + hprime * r"); - return false + println!("fails check on line 65: big_p != g * l + hprime * r"); + return false; } // line 66: check t = l * r if self.t != inner_product(&self.l, &self.r) { - println!("fails check on line 66: t != l * r"); - return false + println!("fails check on line 66: t != l * r"); + return false; } - return true + return true; } } @@ -289,17 +268,17 @@ impl PolyDeg3 { } impl VecPoly2 { - pub fn new(n: usize) -> VecPoly2 { - VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) - } - pub fn eval(&self, x: Scalar) -> Vec { - let n = self.0.len(); - let mut out = vec![Scalar::zero(); n]; - for i in 0..n { - out[i] += self.0[i] + self.1[i] * x; - } - out - } + pub fn new(n: usize) -> VecPoly2 { + VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + } + pub fn eval(&self, x: Scalar) -> Vec { + let n = self.0.len(); + let mut out = vec![Scalar::zero(); n]; + for i in 0..n { + out[i] += self.0[i] + self.1[i] * x; + } + out + } } pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { @@ -329,15 +308,15 @@ pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { } pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { - let mut out = Scalar::zero(); - if a.len() != b.len() { - // throw some error - println!("lengths of vectors don't match for inner product multiplication"); - } - for i in 0..a.len() { - out += a[i] * b[i]; - } - out + let mut out = Scalar::zero(); + if a.len() != b.len() { + // throw some error + println!("lengths of vectors don't match for inner product multiplication"); + } + for i in 0..a.len() { + out += a[i] * b[i]; + } + out } #[cfg(test)] @@ -345,9 +324,19 @@ mod tests { use super::*; #[test] fn test_inner_product() { - let a = vec![Scalar::from_u64(1), Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4)]; - let b = vec![Scalar::from_u64(2), Scalar::from_u64(3), Scalar::from_u64(4), Scalar::from_u64(5)]; - assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } #[test] fn test_t() { @@ -357,27 +346,42 @@ mod tests { assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); } #[test] - fn test_verify() { - for n in &[1, 2, 4, 8, 16, 32] { - println!("n: {:?}", n); - let rp = RangeProof::generate_proof(0, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(u64::max_value(), *n); - assert_eq!(rp.verify_proof(), false); - } + fn test_verify_simple() { + for n in &[1, 2, 4, 8, 16, 32] { + println!("n: {:?}", n); + let rp = RangeProof::generate_proof(0, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(u64::max_value(), *n); + assert_eq!(rp.verify_proof(), false); + } + } + #[test] + fn test_verify_rand_big() { + for i in 0..50 { + let mut rng: OsRng = OsRng::new().unwrap(); + let v: u64 = rng.next_u64(); + println!("v: {:?}", v); + let rp = RangeProof::generate_proof(v, 32); + let expected = v <= 2u64.pow(32); + assert_eq!(rp.verify_proof(), expected); + } + } + #[test] + fn test_verify_rand_small() { + for i in 0..50 { + let mut rng: OsRng = OsRng::new().unwrap(); + let v: u32 = rng.next_u32(); + println!("v: {:?}", v); + let rp = RangeProof::generate_proof(v as u64, 32); + assert_eq!(rp.verify_proof(), true); + } } - // #[test] - // fn test_verify_rand() { - // for i in 0..1000 { - // let n = 32; - // } - // } } mod bench { @@ -391,12 +395,12 @@ mod bench { } #[bench] fn benchmark_make_proofs(b: &mut Bencher) { - for n in &[4, 8, 16, 32] { - b.iter(|| RangeProof::generate_proof(0, *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32), *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n)); - b.iter(|| RangeProof::generate_proof(u64::max_value(), *n)); - } + for n in &[4, 8, 16, 32] { + b.iter(|| RangeProof::generate_proof(0, *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32), *n)); + b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n)); + b.iter(|| RangeProof::generate_proof(u64::max_value(), *n)); + } } } From 03ccaf8000beeb438c74e960c759706cadb4d278 Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 9 Feb 2018 12:10:58 -0800 Subject: [PATCH 016/186] speeding up verification --- src/lib.rs | 72 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 21 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d40c6d38..146b41ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; -use rand::{OsRng, Rng}; +use rand::OsRng; // use rand::SeedableRng // use rand::StdRng; @@ -96,7 +96,7 @@ impl RangeProof { l.0[i] += a_l - z; l.1[i] += s_l[i]; - r.0[i] += exp_y * (a_r + z) + z * z * exp_2; + r.0[i] += exp_y * (a_r + z) + z2 * exp_2; r.1[i] += exp_y * s_r[i]; // if v_i == 0 { // r0[i] -= exp_y; @@ -144,7 +144,7 @@ impl RangeProof { let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? // Generate final values for proof (line 55-60) - let tau_x = tau_1 * x + tau_2 * x * x + z * z * gamma; + let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; let mu = alpha + rho * x; let t_hat = t.0 + t.1 * x + t.2 * x * x; @@ -162,7 +162,7 @@ impl RangeProof { // is it ok to convert a_l to scalar? l_total.push(Scalar::from_u64(a_l) - z + s_l[i] * x); - r_total.push(exp_y * (z + s_r[i] * x) + z * z * exp_2); + r_total.push(exp_y * (z + s_r[i] * x) + z2 * exp_2); if a_l == 0 { r_total[i] -= exp_y } @@ -233,24 +233,42 @@ impl RangeProof { let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let mut sum_g_vec = RistrettoPoint::identity(); + + for i in 0..self.n { + sum_g_vec += g_vec[i]; + } + big_p -= sum_g_vec * z; + for i in 0..self.n { - big_p -= g_vec[i] * z; // IS THIS RIGHT? - big_p += hprime_vec[i] * (z * exp_y + z * z * exp_2); + // big_p -= g_vec[i] * z; + big_p += hprime_vec[i] * (z * exp_y + z2 * exp_2); exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } + /* + // Compute big_s (in the paper: S; line 43-45) + let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); + let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); + let big_s = ristretto::multiscalar_mult(&randomness, points_iter); +*/ // line 65: check big_p against l, r let mut big_p_check = self.h * self.mu; - for i in 0..self.n { - big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; - } - if big_p != big_p_check { - println!("fails check on line 65: big_p != g * l + hprime * r"); - return false; - } + { + let points_iter = g_vec.iter().chain(hprime_vec.iter()); + let scalars_iter = self.l.iter().chain(self.r.iter()); + big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); + // for i in 0..self.n { + // big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; + // } + if big_p != big_p_check { + println!("fails check on line 65: big_p != g * l + hprime * r"); + return false; + } + } // line 66: check t = l * r if self.t != inner_product(&self.l, &self.r) { println!("fails check on line 66: t != l * r"); @@ -394,13 +412,25 @@ mod bench { b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } #[bench] - fn benchmark_make_proofs(b: &mut Bencher) { - for n in &[4, 8, 16, 32] { - b.iter(|| RangeProof::generate_proof(0, *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32), *n)); - b.iter(|| RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n)); - b.iter(|| RangeProof::generate_proof(u64::max_value(), *n)); - } + fn benchmark_make_proofs_64(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); + } + #[bench] + fn benchmark_make_proofs_32(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); + } + #[bench] + fn benchmark_verify_proof_64(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + let rp = RangeProof::generate_proof(rng.next_u64(), 64); + b.iter(|| rp.verify_proof()); + } + #[bench] + fn benchmark_verify_proof_32(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + let rp = RangeProof::generate_proof(rng.next_u32() as u64, 32); + b.iter(|| rp.verify_proof()); } } From af66f59219d760dc22cb87549cfb8def1a0fd246 Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 9 Feb 2018 17:04:19 -0800 Subject: [PATCH 017/186] it compiles now --- Cargo.toml | 4 ++ src/lib.rs | 148 ++++++++++++++++++++++------------------------------- 2 files changed, 64 insertions(+), 88 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d3fed8f7..1711048a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,3 +7,7 @@ authors = ["Cathie "] curve25519-dalek = { version = "^0.15", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" + +[features] +yolocrypto = ["curve25519-dalek/yolocrypto"] +std = ["curve25519-dalek/std"] \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 146b41ac..131099ad 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,9 +10,7 @@ use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; -use rand::OsRng; -// use rand::SeedableRng -// use rand::StdRng; +use rand::{OsRng, Rng}; struct PolyDeg3(Scalar, Scalar, Scalar); struct VecPoly2(Vec, Vec); @@ -78,48 +76,12 @@ impl RangeProof { // Generate y, z by committing to A, S (line 46-48) let (y, z) = commit(&big_a, &big_s); - // Calculate t - - /* - // APPROACH 1 TO CALCULATING T: - // calculate vectors l0, l1, r0, r1 and multiply - let mut l = VecPoly2::new(n); - let mut r = VecPoly2::new(n); - let mut t = PolyDeg3::new(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - - for i in 0..n { - let v_i = (v >> i) & 1; - let a_l = Scalar::from_u64(v_i); - let a_r = a_l - Scalar::one(); - - l.0[i] += a_l - z; - l.1[i] += s_l[i]; - r.0[i] += exp_y * (a_r + z) + z2 * exp_2; - r.1[i] += exp_y * s_r[i]; - // if v_i == 0 { - // r0[i] -= exp_y; - // } else { - // l0[i] += Scalar::one(); - // } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - t.0 = inner_product(&l.0, &r.0); - t.1 = inner_product(&l.0, &r.1) + inner_product(&l.1, &r.0); - t.2 = inner_product(&l.1, &r.1); - */ - - // APPROACH 2 TO CALCULATING T: - // calculate scalars t0, t1, t2 seperately + // Calculate t by calculating scalars t0, t1, t2 seperately let mut t = PolyDeg3::new(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let z2 = z * z; let z3 = z2 * z; - for i in 0..n { let v_i = (v >> i) & 1; t.0 += exp_y * (z - z2) - z3 * exp_2; @@ -136,6 +98,35 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } + /* + // alternative approach to calculating t: calculate vectors l0, l1, r0, r1 and multiply + let mut l = VecPoly2::new(n); + let mut r = VecPoly2::new(n); + let z2 = z * z; + let mut t = PolyDeg3::new(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for i in 0..n { + let v_i = (v >> i) & 1; + + l.0[i] -= z; + l.1[i] += s_l[i]; + r.0[i] += exp_y * z + z2 * exp_2; + r.1[i] += exp_y * s_r[i]; + if v_i == 0 { + r.0[i] -= exp_y; + } else { + l.0[i] += Scalar::one(); + } + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + t.0 = inner_product(&l.0, &r.0); + t.1 = inner_product(&l.0, &r.1) + inner_product(&l.1, &r.0); + t.2 = inner_product(&l.1, &r.1); + */ + + // Generate x by committing to big_t_1, big_t_2 (line 49-54) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); @@ -150,8 +141,6 @@ impl RangeProof { // Calculate l, r - which is only necessary if not doing IPP (line 55-57) // Adding this in a seperate loop so we can remove it easily later - - // APPROACH 1 TO CALCULATING l, r let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 let mut l_total = Vec::new(); @@ -159,19 +148,19 @@ impl RangeProof { for i in 0..n { let a_l = (v >> i) & 1; - - // is it ok to convert a_l to scalar? - l_total.push(Scalar::from_u64(a_l) - z + s_l[i] * x); + l_total.push(-z + s_l[i] * x); r_total.push(exp_y * (z + s_r[i] * x) + z2 * exp_2); if a_l == 0 { - r_total[i] -= exp_y + r_total[i] -= exp_y; + } else { + l_total[i] += Scalar::one(); } exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } /* - // APPROACH 2 TO CALCULATING l, r + // alternative approach to calculating l, r: calculate from computed l0, l1, r0, r1 let l_total = l.eval(x); let r_total = r.eval(x); */ @@ -202,21 +191,14 @@ impl RangeProof { let g_vec = make_generators(&self.g, self.n); let mut hprime_vec = make_generators(&self.h, self.n); - // line 62: calculate hprime_vec - let mut exp_y = Scalar::one(); // start at y^0 = 1 - for i in 0..self.n { - hprime_vec[i] = hprime_vec[i] * Scalar::invert(&exp_y); - exp_y = exp_y * y; // y^i -> y^(i+1) - } - - // line 63 + // line 63: check that t = t0 + t1 * x + t2 * x * x let z2 = z * z; let z3 = z2 * z; let mut power_g = Scalar::zero(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 for _ in 0..self.n { - power_g += -z2 * exp_y - z3 * exp_2 + z * exp_y; + power_g += (z - z2) * exp_y - z3 * exp_2; exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) @@ -228,48 +210,38 @@ impl RangeProof { return false; } - // line 64: calculate big_p - let mut big_p = self.big_a + self.big_s * x; - - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + // line 62: calculate hprime + // line 64: compute commitment to l, r let mut sum_g_vec = RistrettoPoint::identity(); - for i in 0..self.n { sum_g_vec += g_vec[i]; } + let mut big_p = self.big_a + self.big_s * x; big_p -= sum_g_vec * z; + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let inverse_y = Scalar::invert(&y); + let mut inv_exp_y = Scalar::one(); // start at y^-0 = 1 for i in 0..self.n { - // big_p -= g_vec[i] * z; + hprime_vec[i] = hprime_vec[i] * inv_exp_y; big_p += hprime_vec[i] * (z * exp_y + z2 * exp_2); - exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + inv_exp_y = inv_exp_y * inverse_y; // y^(-i) * y^(-1) -> y^(-(i+1)) } - /* - // Compute big_s (in the paper: S; line 43-45) - let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); - let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); - let big_s = ristretto::multiscalar_mult(&randomness, points_iter); -*/ - // line 65: check big_p against l, r + // line 65: check that l, r are correct let mut big_p_check = self.h * self.mu; - { - - let points_iter = g_vec.iter().chain(hprime_vec.iter()); - let scalars_iter = self.l.iter().chain(self.r.iter()); - big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); - // for i in 0..self.n { - // big_p_check += g_vec[i] * self.l[i] + hprime_vec[i] * self.r[i]; - // } - if big_p != big_p_check { - println!("fails check on line 65: big_p != g * l + hprime * r"); - return false; - } + let points_iter = g_vec.iter().chain(hprime_vec.iter()); + let scalars_iter = self.l.iter().chain(self.r.iter()); + big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); + if big_p != big_p_check { + println!("fails check on line 65: big_p != g * l + hprime * r"); + return false; } - // line 66: check t = l * r + + // line 66: check that t is correct if self.t != inner_product(&self.l, &self.r) { println!("fails check on line 66: t != l * r"); return false; @@ -381,7 +353,7 @@ mod tests { } #[test] fn test_verify_rand_big() { - for i in 0..50 { + for _ in 0..50 { let mut rng: OsRng = OsRng::new().unwrap(); let v: u64 = rng.next_u64(); println!("v: {:?}", v); @@ -392,7 +364,7 @@ mod tests { } #[test] fn test_verify_rand_small() { - for i in 0..50 { + for _ in 0..50 { let mut rng: OsRng = OsRng::new().unwrap(); let v: u32 = rng.next_u32(); println!("v: {:?}", v); @@ -412,12 +384,12 @@ mod bench { b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } #[bench] - fn benchmark_make_proofs_64(b: &mut Bencher) { + fn benchmark_make_proof_64(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); } #[bench] - fn benchmark_make_proofs_32(b: &mut Bencher) { + fn benchmark_make_proof_32(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); } From 84eade74eeb6d0fc4fa0ef5a2858af3442294d5d Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Sun, 11 Feb 2018 22:16:07 -0800 Subject: [PATCH 018/186] added fiatshamir api --- Cargo.toml | 1 + src/fiatshamir.rs | 91 +++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 16 +++++++-- 3 files changed, 105 insertions(+), 3 deletions(-) create mode 100644 src/fiatshamir.rs diff --git a/Cargo.toml b/Cargo.toml index 1711048a..8f9dcadf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Cathie "] curve25519-dalek = { version = "^0.15", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" +tiny-keccak = "^1.4" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] diff --git a/src/fiatshamir.rs b/src/fiatshamir.rs new file mode 100644 index 00000000..9ec78054 --- /dev/null +++ b/src/fiatshamir.rs @@ -0,0 +1,91 @@ +use curve25519_dalek::ristretto::CompressedRistretto; +use curve25519_dalek::scalar::Scalar; +use tiny_keccak::Keccak; + +pub trait Element { + fn as_bytes(&self) -> &[u8]; +} + +pub trait FS { + /// Commits the encoded element. + /// Element is not length-prefixed or otherwise disambiguated in the input stream, + /// so all protocol parameters must be pre-committed in a form of a label or prior commitments. + fn commit(&mut self, element: &E); + + /// Returns another challenge scalar from the current state. + fn challenge(&mut self) -> Scalar; +} + +enum SpongeState { + Absorbing, + Squeezing, +} + +pub struct ShakeFS { + hash: Keccak, + state: SpongeState, +} + +impl ShakeFS { + pub fn new>(label: S) -> Self { + let mut hash = Keccak::new_shake128(); + hash.update(label.as_ref()); + hash.pad(); + hash.fill_block(); + ShakeFS { + hash: hash, + state: SpongeState::Absorbing, + } + } +} + +impl FS for ShakeFS { + fn commit(&mut self, element: &E) { + match self.state { + SpongeState::Absorbing => {} + SpongeState::Squeezing => { + // no padding because squeeze phase does not insert data + self.hash.fill_block(); + self.state = SpongeState::Absorbing; + } + } + self.hash.absorb(element.as_bytes()); + } + fn challenge(&mut self) -> Scalar { + match self.state { + SpongeState::Absorbing => { + self.hash.pad(); + self.hash.fill_block(); + self.state = SpongeState::Squeezing; + } + SpongeState::Squeezing => {} + } + let mut buf = [0u8; 64]; + self.hash.squeeze(&mut buf); + Scalar::from_bytes_mod_order_wide(&buf) + } +} + +impl Element for CompressedRistretto { + fn as_bytes(&self) -> &[u8] { + CompressedRistretto::as_bytes(self) + } +} + +impl Element for Scalar { + fn as_bytes(&self) -> &[u8] { + Scalar::as_bytes(self) + } +} + +impl Element for [u8; 1] { + fn as_bytes(&self) -> &[u8] { + &self[..] + } +} + +impl Element for [u8; 8] { + fn as_bytes(&self) -> &[u8] { + &self[..] + } +} diff --git a/src/lib.rs b/src/lib.rs index 131099ad..b9d643b2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,21 +1,28 @@ #![feature(test)] +#![feature(non_ascii_idents)] +#![allow(non_snake_case)] +extern crate core; extern crate curve25519_dalek; extern crate sha2; extern crate test; extern crate rand; +extern crate tiny_keccak; + +mod fiatshamir; + use std::iter; +use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; -use sha2::{Digest, Sha256, Sha512}; use curve25519_dalek::scalar::Scalar; -use rand::{OsRng, Rng}; +use rand::OsRng; struct PolyDeg3(Scalar, Scalar, Scalar); struct VecPoly2(Vec, Vec); -struct RangeProof { +pub struct RangeProof { tau_x: Scalar, mu: Scalar, t: Scalar, @@ -312,6 +319,8 @@ pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { #[cfg(test)] mod tests { use super::*; + use rand::Rng; + #[test] fn test_inner_product() { let a = vec![ @@ -376,6 +385,7 @@ mod tests { mod bench { use super::*; + use rand::Rng; use test::Bencher; #[bench] From a57bee58570505037d42aee21c87527f1fff8200 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Mon, 12 Feb 2018 15:11:04 -0800 Subject: [PATCH 019/186] Feature: random oracle API (#2) Add a random oracle API --- Cargo.toml | 3 + src/fiatshamir.rs | 91 ------------------------ src/lib.rs | 3 +- src/random_oracle.rs | 166 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+), 93 deletions(-) delete mode 100644 src/fiatshamir.rs create mode 100644 src/random_oracle.rs diff --git a/Cargo.toml b/Cargo.toml index 8f9dcadf..c71a8b23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,9 @@ sha2 = "^0.7" rand = "^0.4" tiny-keccak = "^1.4" +[dev-dependencies] +hex = "^0.3" + [features] yolocrypto = ["curve25519-dalek/yolocrypto"] std = ["curve25519-dalek/std"] \ No newline at end of file diff --git a/src/fiatshamir.rs b/src/fiatshamir.rs deleted file mode 100644 index 9ec78054..00000000 --- a/src/fiatshamir.rs +++ /dev/null @@ -1,91 +0,0 @@ -use curve25519_dalek::ristretto::CompressedRistretto; -use curve25519_dalek::scalar::Scalar; -use tiny_keccak::Keccak; - -pub trait Element { - fn as_bytes(&self) -> &[u8]; -} - -pub trait FS { - /// Commits the encoded element. - /// Element is not length-prefixed or otherwise disambiguated in the input stream, - /// so all protocol parameters must be pre-committed in a form of a label or prior commitments. - fn commit(&mut self, element: &E); - - /// Returns another challenge scalar from the current state. - fn challenge(&mut self) -> Scalar; -} - -enum SpongeState { - Absorbing, - Squeezing, -} - -pub struct ShakeFS { - hash: Keccak, - state: SpongeState, -} - -impl ShakeFS { - pub fn new>(label: S) -> Self { - let mut hash = Keccak::new_shake128(); - hash.update(label.as_ref()); - hash.pad(); - hash.fill_block(); - ShakeFS { - hash: hash, - state: SpongeState::Absorbing, - } - } -} - -impl FS for ShakeFS { - fn commit(&mut self, element: &E) { - match self.state { - SpongeState::Absorbing => {} - SpongeState::Squeezing => { - // no padding because squeeze phase does not insert data - self.hash.fill_block(); - self.state = SpongeState::Absorbing; - } - } - self.hash.absorb(element.as_bytes()); - } - fn challenge(&mut self) -> Scalar { - match self.state { - SpongeState::Absorbing => { - self.hash.pad(); - self.hash.fill_block(); - self.state = SpongeState::Squeezing; - } - SpongeState::Squeezing => {} - } - let mut buf = [0u8; 64]; - self.hash.squeeze(&mut buf); - Scalar::from_bytes_mod_order_wide(&buf) - } -} - -impl Element for CompressedRistretto { - fn as_bytes(&self) -> &[u8] { - CompressedRistretto::as_bytes(self) - } -} - -impl Element for Scalar { - fn as_bytes(&self) -> &[u8] { - Scalar::as_bytes(self) - } -} - -impl Element for [u8; 1] { - fn as_bytes(&self) -> &[u8] { - &self[..] - } -} - -impl Element for [u8; 8] { - fn as_bytes(&self) -> &[u8] { - &self[..] - } -} diff --git a/src/lib.rs b/src/lib.rs index b9d643b2..0704c47f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,4 @@ #![feature(test)] -#![feature(non_ascii_idents)] #![allow(non_snake_case)] extern crate core; @@ -9,7 +8,7 @@ extern crate test; extern crate rand; extern crate tiny_keccak; -mod fiatshamir; +mod random_oracle; use std::iter; use sha2::{Digest, Sha256, Sha512}; diff --git a/src/random_oracle.rs b/src/random_oracle.rs new file mode 100644 index 00000000..5a65b8b3 --- /dev/null +++ b/src/random_oracle.rs @@ -0,0 +1,166 @@ +use curve25519_dalek::scalar::Scalar; +use tiny_keccak::Keccak; + +#[derive(Clone)] +pub struct RandomOracle { + hash: Keccak, + state: SpongeState, +} + +impl RandomOracle { + /// Instantiates a new random oracle with a given label that + /// will be committed as a first message with a padding. + pub fn new(label: &[u8]) -> Self { + let mut ro = RandomOracle { + hash: Keccak::new_shake128(), + state: SpongeState::Absorbing, + }; + ro.commit(label); + // makes sure the label is disambiguated from the rest of the messages. + ro.pad(); + ro + } + + /// Sends a message to a random oracle. + /// Each message must be less than 256 bytes long. + pub fn commit(&mut self, message: &[u8]) { + self.set_state(SpongeState::Absorbing); + let len = message.len(); + if len > 255 { + panic!("Committed message must be less than 256 bytes!"); + } + // we use 1-byte length prefix, hence the limitation on the message size. + let lenprefix = [len as u8; 1]; + self.hash.absorb(&lenprefix); + self.hash.absorb(message); + } + + /// Extracts an arbitrary-sized number of bytes as a challenge. + pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { + self.set_state(SpongeState::Squeezing); + self.hash.squeeze(&mut output); + } + + /// Gets a challenge in a form of a scalar by squeezing + /// 64 bytes and reducing them to a scalar. + pub fn challenge_scalar(&mut self) -> Scalar { + let mut buf = [0u8; 64]; + self.challenge_bytes(&mut buf); + Scalar::from_bytes_mod_order_wide(&buf) + } + + /// Ensures that the state is correct. + /// Does necessary padding+permutation if needed to transition from one state to another. + fn set_state(&mut self, newstate: SpongeState) { + if self.state != newstate { + self.pad(); + self.state = newstate; + } + } + + /// Pad separates the prior operations by a full permutation. + /// Each incoming message is length-prefixed anyway, but padding + /// enables pre-computing and re-using the oracle state. + fn pad(&mut self) { + // tiny_keccak's API is not very clear, + // so we'd probably need to fork and either document it, or tweak to make it more sensible. + // 1. pad() only adds keccak padding, but does not advance internal offset and + // does not perform a permutation round. + // 2. fill_block() does not pad, but resets the internal offset and does a permutation round. + match self.state { + SpongeState::Absorbing => { + self.hash.pad(); + self.hash.fill_block(); + } + SpongeState::Squeezing => { + // in the squeezing state we are not feeding messages, + // only reading portions of a state, so padding does not make sense. + // what we need is to perform computation and reset the internal offset to zero. + self.hash.fill_block(); + } + } + } +} + +#[derive(Clone,PartialEq)] +enum SpongeState { + Absorbing, + Squeezing, +} + +#[cfg(test)] +mod tests { + extern crate hex; + use super::*; + + #[test] + fn usage_example() { + let mut ro = RandomOracle::new(b"TestProtocol"); + ro.commit(b"msg1"); + ro.commit(b"msg2"); + { + let mut challenge1 = [0u8; 8]; + ro.challenge_bytes(&mut challenge1); + assert_eq!(hex::encode(challenge1), "7f04fadac332ce45"); + } + { + let mut challenge2 = [0u8; 200]; + ro.challenge_bytes(&mut challenge2); + } + { + let mut challenge3 = [0u8; 8]; + ro.challenge_bytes(&mut challenge3); + assert_eq!(hex::encode(challenge3), "2cd86eb9913c0dc7"); + } + ro.commit(b"msg3"); + { + let mut challenge4 = [0u8; 8]; + ro.challenge_bytes(&mut challenge4); + assert_eq!(hex::encode(challenge4), "383c7fc8d7bf8ad3"); + } + } + + #[test] + fn disambiguation() { + { + let mut ro = RandomOracle::new(b"TestProtocol"); + ro.commit(b"msg1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "42023e04ad4f232c"); + } + } + { + let mut ro = RandomOracle::new(b"TestProtocol"); + ro.commit(b"msg1"); + ro.commit(b"msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "7f04fadac332ce45"); + } + } + { + let mut ro = RandomOracle::new(b"TestProtocol"); + ro.commit(b"msg"); + ro.commit(b"1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "dbbd832ca1fd3c2f"); + } + } + { + let mut ro = RandomOracle::new(b"TestProtocol"); + ro.commit(b"ms"); + ro.commit(b"g1ms"); + ro.commit(b"g2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "18860c017b1d28ec"); + } + } + } +} From 103e9e7166bcefc95fa798a39b8c6a4544af728e Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 12 Feb 2018 16:05:22 -0800 Subject: [PATCH 020/186] Remove alternative ways of calculating t0, t1, t2 --- src/lib.rs | 50 -------------------------------------------------- 1 file changed, 50 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0704c47f..876a0d8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,6 @@ use curve25519_dalek::scalar::Scalar; use rand::OsRng; struct PolyDeg3(Scalar, Scalar, Scalar); -struct VecPoly2(Vec, Vec); pub struct RangeProof { tau_x: Scalar, @@ -104,35 +103,6 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - /* - // alternative approach to calculating t: calculate vectors l0, l1, r0, r1 and multiply - let mut l = VecPoly2::new(n); - let mut r = VecPoly2::new(n); - let z2 = z * z; - let mut t = PolyDeg3::new(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for i in 0..n { - let v_i = (v >> i) & 1; - - l.0[i] -= z; - l.1[i] += s_l[i]; - r.0[i] += exp_y * z + z2 * exp_2; - r.1[i] += exp_y * s_r[i]; - if v_i == 0 { - r.0[i] -= exp_y; - } else { - l.0[i] += Scalar::one(); - } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - t.0 = inner_product(&l.0, &r.0); - t.1 = inner_product(&l.0, &r.1) + inner_product(&l.1, &r.0); - t.2 = inner_product(&l.1, &r.1); - */ - - // Generate x by committing to big_t_1, big_t_2 (line 49-54) let tau_1 = Scalar::random(&mut rng); let tau_2 = Scalar::random(&mut rng); @@ -165,12 +135,6 @@ impl RangeProof { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - /* - // alternative approach to calculating l, r: calculate from computed l0, l1, r0, r1 - let l_total = l.eval(x); - let r_total = r.eval(x); - */ - // Generate proof! (line 61) RangeProof { tau_x: tau_x, @@ -263,20 +227,6 @@ impl PolyDeg3 { } } -impl VecPoly2 { - pub fn new(n: usize) -> VecPoly2 { - VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) - } - pub fn eval(&self, x: Scalar) -> Vec { - let n = self.0.len(); - let mut out = vec![Scalar::zero(); n]; - for i in 0..n { - out[i] += self.0[i] + self.1[i] * x; - } - out - } -} - pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { let mut generators = vec![RistrettoPoint::identity(); n]; From aa9cf89b1ee17143ccf2a83f071f4f4d375b7936 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 12 Feb 2018 16:38:45 -0800 Subject: [PATCH 021/186] Adding scalar invert func & test --- src/lib.rs | 1 + src/scalar.rs | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 src/scalar.rs diff --git a/src/lib.rs b/src/lib.rs index 876a0d8e..a8c08799 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ extern crate rand; extern crate tiny_keccak; mod random_oracle; +mod scalar; use std::iter; use sha2::{Digest, Sha256, Sha512}; diff --git a/src/scalar.rs b/src/scalar.rs new file mode 100644 index 00000000..ec915af4 --- /dev/null +++ b/src/scalar.rs @@ -0,0 +1,99 @@ +use curve25519_dalek::scalar::Scalar; + +pub fn batch_invert(inputs: &mut [Scalar]) { + // First, compute the product of all inputs using a product + // tree: + // + // Inputs: [x_0, x_1, x_2] + // + // Tree: + // + // x_0*x_1*x_2*1 tree[1] + // / \ + // x_0*x_1 x_2*1 tree[2,3] + // / \ / \ + // x_0 x_1 x_2 1 tree[4,5,6,7] + // + // The leaves of the tree are the inputs. We store the tree in + // an array of length 2*n, similar to a binary heap. + // + // To initialize the tree, set every node to 1, then fill in + // the leaf nodes with the input variables. Finally, set every + // non-leaf node to be the product of its children. + let n = inputs.len().next_power_of_two(); + let mut tree = vec![Scalar::one(); 2*n]; + tree[n..n+inputs.len()].copy_from_slice(inputs); + for i in (1..n).rev() { + tree[i] = &tree[2*i] * &tree[2*i+1]; + } + + // The root of the tree is the product of all inputs, and is + // stored at index 1. Compute its inverse. + let allinv = tree[1].invert(); + + // To compute y_i = 1/x_i, start at the i-th leaf node of the + // tree, and walk up to the root of the tree, multiplying + // `allinv` by each sibling. This computes + // + // y_i = y * (all x_j except x_i) + // + // using lg(n) multiplications for each y_i, taking n*lg(n) in + // total. + for i in 0..inputs.len() { + let mut inv = allinv; + let mut node = n + i; + while node > 1 { + inv *= &tree[node ^ 1]; + node = node >> 1; + } + inputs[i] = inv; + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn batch_invert_matches_nonbatched() { + let W = Scalar::from_bits( + [ + 0x84, 0xfc, 0xbc, 0x4f, 0x78, 0x12, 0xa0, 0x06, + 0xd7, 0x91, 0xd9, 0x7a, 0x3a, 0x27, 0xdd, 0x1e, + 0x21, 0x43, 0x45, 0xf7, 0xb1, 0xb9, 0x56, 0x7a, + 0x81, 0x30, 0x73, 0x44, 0x96, 0x85, 0xb5, 0x07, + ] + ); + let X = Scalar::from_bits( + [ + 0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, + 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52, + 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, + 0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04, + ] + ); + let Y = Scalar::from_bits( + [ + 0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, + 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83, 0x86, 0xc3, + 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, + 0xe8, 0xef, 0x7a, 0xc3, 0x1f, 0x35, 0xbb, 0x05, + ] + ); + let Z = Scalar::from_bits( + [ + 0x05, 0x9d, 0x3e, 0x0b, 0x09, 0x26, 0x50, 0x3d, + 0xa3, 0x84, 0xa1, 0x3c, 0x92, 0x7a, 0xc2, 0x06, + 0x41, 0x98, 0xcf, 0x34, 0x3a, 0x24, 0xd5, 0xb7, + 0xeb, 0x33, 0x6a, 0x2d, 0xfc, 0x11, 0x21, 0x0b, + ] + ); + + let list = vec![W, X, Y, Z, W*Y, X*Z, Y*Y, W*Z]; + let mut inv_list = list.clone(); + batch_invert(&mut inv_list[..]); + for i in 0..8 { + assert_eq!(list[i].invert(), inv_list[i]); + } + } +} \ No newline at end of file From 02dc9b5ce74594686a3ddc4bd8aaf08056435b25 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 13 Feb 2018 13:20:27 -0800 Subject: [PATCH 022/186] move range proofs to a separate file (#3) --- Cargo.toml | 3 +- README.md | 19 ++- src/lib.rs | 365 +------------------------------------------ src/random_oracle.rs | 9 +- src/range_proof.rs | 358 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 388 insertions(+), 366 deletions(-) create mode 100644 src/range_proof.rs diff --git a/Cargo.toml b/Cargo.toml index c71a8b23..86e25491 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,4 +14,5 @@ hex = "^0.3" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] -std = ["curve25519-dalek/std"] \ No newline at end of file +std = ["curve25519-dalek/std"] +bench = [] diff --git a/README.md b/README.md index f56790c3..45cc9e46 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@ -# ristretto-bulletproofs +# Ristretto Bulletproofs + Implementing [bulletproofs](https://crypto.stanford.edu/bulletproofs/) using [ristretto](https://github.com/dalek-cryptography/ed25519-dalek). -Step 1 of a larger proposed proposed plan, detailed [here](https://github.com/chain/research/issues/7). \ No newline at end of file +Step 1 of a larger proposed proposed plan, detailed [here](https://github.com/chain/research/issues/7). + +## Development + +Run tests: + +``` +cargo test +``` + +Run benchmarks: + +``` +cargo bench --features="bench" +``` diff --git a/src/lib.rs b/src/lib.rs index 876a0d8e..0b4500f4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,367 +1,14 @@ -#![feature(test)] -#![allow(non_snake_case)] +#![cfg_attr(feature = "bench", feature(test))] -extern crate core; extern crate curve25519_dalek; extern crate sha2; -extern crate test; extern crate rand; extern crate tiny_keccak; -mod random_oracle; - -use std::iter; -use sha2::{Digest, Sha256, Sha512}; -use curve25519_dalek::ristretto::RistrettoPoint; -use curve25519_dalek::ristretto; -use curve25519_dalek::traits::Identity; -use curve25519_dalek::scalar::Scalar; -use rand::OsRng; - -struct PolyDeg3(Scalar, Scalar, Scalar); - -pub struct RangeProof { - tau_x: Scalar, - mu: Scalar, - t: Scalar, - - // don't need if doing inner product proof - l: Vec, - r: Vec, - - // committed values - big_v: RistrettoPoint, - big_a: RistrettoPoint, - big_s: RistrettoPoint, - big_t_1: RistrettoPoint, - big_t_2: RistrettoPoint, - - // public knowledge - n: usize, - g: RistrettoPoint, - h: RistrettoPoint, -} - -impl RangeProof { - pub fn generate_proof(v: u64, n: usize) -> RangeProof { - let mut rng: OsRng = OsRng::new().unwrap(); - // useful for debugging: - // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - - // Setup: generate groups g & h, commit to v (line 34) - let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let h = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let g_vec = make_generators(g, n); - let h_vec = make_generators(h, n); - let gamma = Scalar::random(&mut rng); - let big_v = h * gamma + g * Scalar::from_u64(v); - - // Compute big_a (line 39-42) - let alpha = Scalar::random(&mut rng); - let mut big_a = h * alpha; - for i in 0..n { - let v_i = (v >> i) & 1; - if v_i == 0 { - big_a -= h_vec[i]; - } else { - big_a += g_vec[i]; - } - } - - // Compute big_s (in the paper: S; line 43-45) - let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); - let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); - let big_s = ristretto::multiscalar_mult(&randomness, points_iter); - - // Save/label randomness (rho, s_L, s_R) to be used later - let rho = &randomness[0]; - let s_l = &randomness[1..(n + 1)]; - let s_r = &randomness[(n + 1)..(1 + 2 * n)]; - - // Generate y, z by committing to A, S (line 46-48) - let (y, z) = commit(&big_a, &big_s); - - // Calculate t by calculating scalars t0, t1, t2 seperately - let mut t = PolyDeg3::new(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let z2 = z * z; - let z3 = z2 * z; - for i in 0..n { - let v_i = (v >> i) & 1; - t.0 += exp_y * (z - z2) - z3 * exp_2; - t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); - t.2 += s_l[i] * exp_y * s_r[i]; - // check if a_l is 0 or 1 - if v_i == 0 { - t.1 -= s_l[i] * exp_y; - } else { - t.0 += z2 * exp_2; - t.1 += s_r[i] * exp_y; - } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - // Generate x by committing to big_t_1, big_t_2 (line 49-54) - let tau_1 = Scalar::random(&mut rng); - let tau_2 = Scalar::random(&mut rng); - let big_t_1 = g * t.1 + h * tau_1; - let big_t_2 = g * t.2 + h * tau_2; - let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? - - // Generate final values for proof (line 55-60) - let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; - let mu = alpha + rho * x; - let t_hat = t.0 + t.1 * x + t.2 * x * x; - - // Calculate l, r - which is only necessary if not doing IPP (line 55-57) - // Adding this in a seperate loop so we can remove it easily later - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let mut l_total = Vec::new(); - let mut r_total = Vec::new(); - - for i in 0..n { - let a_l = (v >> i) & 1; - l_total.push(-z + s_l[i] * x); - r_total.push(exp_y * (z + s_r[i] * x) + z2 * exp_2); - if a_l == 0 { - r_total[i] -= exp_y; - } else { - l_total[i] += Scalar::one(); - } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - // Generate proof! (line 61) - RangeProof { - tau_x: tau_x, - mu: mu, - t: t_hat, - l: l_total, - r: r_total, - - big_v: big_v, - big_a: big_a, - big_s: big_s, - big_t_1: big_t_1, - big_t_2: big_t_2, - - n: n, - g: *g, - h: *h, - } - } - - pub fn verify_proof(&self) -> bool { - let (y, z) = commit(&self.big_a, &self.big_s); - let (x, _) = commit(&self.big_t_1, &self.big_t_2); - let g_vec = make_generators(&self.g, self.n); - let mut hprime_vec = make_generators(&self.h, self.n); - - // line 63: check that t = t0 + t1 * x + t2 * x * x - let z2 = z * z; - let z3 = z2 * z; - let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..self.n { - power_g += (z - z2) * exp_y - z3 * exp_2; - - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; - let t_commit = self.g * self.t + self.h * self.tau_x; - if t_commit != t_check { - println!("fails check on line 63"); - return false; - } - - // line 62: calculate hprime - // line 64: compute commitment to l, r - let mut sum_g_vec = RistrettoPoint::identity(); - for i in 0..self.n { - sum_g_vec += g_vec[i]; - } - let mut big_p = self.big_a + self.big_s * x; - big_p -= sum_g_vec * z; - - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let inverse_y = Scalar::invert(&y); - let mut inv_exp_y = Scalar::one(); // start at y^-0 = 1 - for i in 0..self.n { - hprime_vec[i] = hprime_vec[i] * inv_exp_y; - big_p += hprime_vec[i] * (z * exp_y + z2 * exp_2); - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - inv_exp_y = inv_exp_y * inverse_y; // y^(-i) * y^(-1) -> y^(-(i+1)) - } - - // line 65: check that l, r are correct - let mut big_p_check = self.h * self.mu; - let points_iter = g_vec.iter().chain(hprime_vec.iter()); - let scalars_iter = self.l.iter().chain(self.r.iter()); - big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); - if big_p != big_p_check { - println!("fails check on line 65: big_p != g * l + hprime * r"); - return false; - } - - // line 66: check that t is correct - if self.t != inner_product(&self.l, &self.r) { - println!("fails check on line 66: t != l * r"); - return false; - } - - return true; - } -} - -impl PolyDeg3 { - pub fn new() -> PolyDeg3 { - PolyDeg3(Scalar::zero(), Scalar::zero(), Scalar::zero()) - } -} - -pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { - let mut generators = vec![RistrettoPoint::identity(); n]; - - generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); - for i in 1..n { - let prev = generators[i - 1].compress(); - generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); - } - generators -} - -pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { - let mut c1_digest = Sha512::new(); - c1_digest.input(v1.compress().as_bytes()); - c1_digest.input(v2.compress().as_bytes()); - let c1 = Scalar::from_hash(c1_digest); - - let mut c2_digest = Sha512::new(); - c2_digest.input(v1.compress().as_bytes()); - c2_digest.input(v2.compress().as_bytes()); - c2_digest.input(c1.as_bytes()); - let c2 = Scalar::from_hash(c2_digest); - - (c1, c2) -} - -pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { - let mut out = Scalar::zero(); - if a.len() != b.len() { - // throw some error - println!("lengths of vectors don't match for inner product multiplication"); - } - for i in 0..a.len() { - out += a[i] * b[i]; - } - out -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::Rng; - - #[test] - fn test_inner_product() { - let a = vec![ - Scalar::from_u64(1), - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - ]; - let b = vec![ - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - Scalar::from_u64(5), - ]; - assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); - } - #[test] - fn test_t() { - let rp = RangeProof::generate_proof(1, 1); - assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); - let rp = RangeProof::generate_proof(1, 2); - assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); - } - #[test] - fn test_verify_simple() { - for n in &[1, 2, 4, 8, 16, 32] { - println!("n: {:?}", n); - let rp = RangeProof::generate_proof(0, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(u64::max_value(), *n); - assert_eq!(rp.verify_proof(), false); - } - } - #[test] - fn test_verify_rand_big() { - for _ in 0..50 { - let mut rng: OsRng = OsRng::new().unwrap(); - let v: u64 = rng.next_u64(); - println!("v: {:?}", v); - let rp = RangeProof::generate_proof(v, 32); - let expected = v <= 2u64.pow(32); - assert_eq!(rp.verify_proof(), expected); - } - } - #[test] - fn test_verify_rand_small() { - for _ in 0..50 { - let mut rng: OsRng = OsRng::new().unwrap(); - let v: u32 = rng.next_u32(); - println!("v: {:?}", v); - let rp = RangeProof::generate_proof(v as u64, 32); - assert_eq!(rp.verify_proof(), true); - } - } -} +#[cfg(all(test, feature = "bench"))] +extern crate test; -mod bench { - use super::*; - use rand::Rng; - use test::Bencher; +mod random_oracle; +mod range_proof; - #[bench] - fn benchmark_make_generators(b: &mut Bencher) { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); - } - #[bench] - fn benchmark_make_proof_64(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); - } - #[bench] - fn benchmark_make_proof_32(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); - } - #[bench] - fn benchmark_verify_proof_64(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - let rp = RangeProof::generate_proof(rng.next_u64(), 64); - b.iter(|| rp.verify_proof()); - } - #[bench] - fn benchmark_verify_proof_32(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - let rp = RangeProof::generate_proof(rng.next_u32() as u64, 32); - b.iter(|| rp.verify_proof()); - } -} +pub use range_proof::*; diff --git a/src/random_oracle.rs b/src/random_oracle.rs index 5a65b8b3..a37d10ed 100644 --- a/src/random_oracle.rs +++ b/src/random_oracle.rs @@ -41,7 +41,7 @@ impl RandomOracle { self.hash.squeeze(&mut output); } - /// Gets a challenge in a form of a scalar by squeezing + /// Gets a challenge in a form of a scalar by squeezing /// 64 bytes and reducing them to a scalar. pub fn challenge_scalar(&mut self) -> Scalar { let mut buf = [0u8; 64]; @@ -62,11 +62,12 @@ impl RandomOracle { /// Each incoming message is length-prefixed anyway, but padding /// enables pre-computing and re-using the oracle state. fn pad(&mut self) { - // tiny_keccak's API is not very clear, + // tiny_keccak's API is not very clear, // so we'd probably need to fork and either document it, or tweak to make it more sensible. // 1. pad() only adds keccak padding, but does not advance internal offset and // does not perform a permutation round. - // 2. fill_block() does not pad, but resets the internal offset and does a permutation round. + // 2. fill_block() does not pad, but resets the internal offset + // and does a permutation round. match self.state { SpongeState::Absorbing => { self.hash.pad(); @@ -82,7 +83,7 @@ impl RandomOracle { } } -#[derive(Clone,PartialEq)] +#[derive(Clone, PartialEq)] enum SpongeState { Absorbing, Squeezing, diff --git a/src/range_proof.rs b/src/range_proof.rs new file mode 100644 index 00000000..902f7d43 --- /dev/null +++ b/src/range_proof.rs @@ -0,0 +1,358 @@ +#![allow(non_snake_case)] + +use std::iter; +use sha2::{Digest, Sha256, Sha512}; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::ristretto; +use curve25519_dalek::traits::Identity; +use curve25519_dalek::scalar::Scalar; +use rand::OsRng; + +struct PolyDeg3(Scalar, Scalar, Scalar); + +pub struct RangeProof { + tau_x: Scalar, + mu: Scalar, + t: Scalar, + + // don't need if doing inner product proof + l: Vec, + r: Vec, + + // committed values + big_v: RistrettoPoint, + big_a: RistrettoPoint, + big_s: RistrettoPoint, + big_t_1: RistrettoPoint, + big_t_2: RistrettoPoint, + + // public knowledge + n: usize, + g: RistrettoPoint, + h: RistrettoPoint, +} + +impl RangeProof { + pub fn generate_proof(v: u64, n: usize) -> RangeProof { + let mut rng: OsRng = OsRng::new().unwrap(); + // useful for debugging: + // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); + + // Setup: generate groups g & h, commit to v (line 34) + let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let h = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let g_vec = make_generators(g, n); + let h_vec = make_generators(h, n); + let gamma = Scalar::random(&mut rng); + let big_v = h * gamma + g * Scalar::from_u64(v); + + // Compute big_a (line 39-42) + let alpha = Scalar::random(&mut rng); + let mut big_a = h * alpha; + for i in 0..n { + let v_i = (v >> i) & 1; + if v_i == 0 { + big_a -= h_vec[i]; + } else { + big_a += g_vec[i]; + } + } + + // Compute big_s (in the paper: S; line 43-45) + let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); + let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); + let big_s = ristretto::multiscalar_mult(&randomness, points_iter); + + // Save/label randomness (rho, s_L, s_R) to be used later + let rho = &randomness[0]; + let s_l = &randomness[1..(n + 1)]; + let s_r = &randomness[(n + 1)..(1 + 2 * n)]; + + // Generate y, z by committing to A, S (line 46-48) + let (y, z) = commit(&big_a, &big_s); + + // Calculate t by calculating scalars t0, t1, t2 seperately + let mut t = PolyDeg3::new(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let z2 = z * z; + let z3 = z2 * z; + for i in 0..n { + let v_i = (v >> i) & 1; + t.0 += exp_y * (z - z2) - z3 * exp_2; + t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); + t.2 += s_l[i] * exp_y * s_r[i]; + // check if a_l is 0 or 1 + if v_i == 0 { + t.1 -= s_l[i] * exp_y; + } else { + t.0 += z2 * exp_2; + t.1 += s_r[i] * exp_y; + } + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + // Generate x by committing to big_t_1, big_t_2 (line 49-54) + let tau_1 = Scalar::random(&mut rng); + let tau_2 = Scalar::random(&mut rng); + let big_t_1 = g * t.1 + h * tau_1; + let big_t_2 = g * t.2 + h * tau_2; + let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? + + // Generate final values for proof (line 55-60) + let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; + let mu = alpha + rho * x; + let t_hat = t.0 + t.1 * x + t.2 * x * x; + + // Calculate l, r - which is only necessary if not doing IPP (line 55-57) + // Adding this in a seperate loop so we can remove it easily later + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let mut l_total = Vec::new(); + let mut r_total = Vec::new(); + + for i in 0..n { + let a_l = (v >> i) & 1; + l_total.push(-z + s_l[i] * x); + r_total.push(exp_y * (z + s_r[i] * x) + z2 * exp_2); + if a_l == 0 { + r_total[i] -= exp_y; + } else { + l_total[i] += Scalar::one(); + } + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + // Generate proof! (line 61) + RangeProof { + tau_x: tau_x, + mu: mu, + t: t_hat, + l: l_total, + r: r_total, + + big_v: big_v, + big_a: big_a, + big_s: big_s, + big_t_1: big_t_1, + big_t_2: big_t_2, + + n: n, + g: *g, + h: *h, + } + } + + pub fn verify_proof(&self) -> bool { + let (y, z) = commit(&self.big_a, &self.big_s); + let (x, _) = commit(&self.big_t_1, &self.big_t_2); + let g_vec = make_generators(&self.g, self.n); + let mut hprime_vec = make_generators(&self.h, self.n); + + // line 63: check that t = t0 + t1 * x + t2 * x * x + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..self.n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; + let t_commit = self.g * self.t + self.h * self.tau_x; + if t_commit != t_check { + //println!("fails check on line 63"); + return false; + } + + // line 62: calculate hprime + // line 64: compute commitment to l, r + let mut sum_g_vec = RistrettoPoint::identity(); + for i in 0..self.n { + sum_g_vec += g_vec[i]; + } + let mut big_p = self.big_a + self.big_s * x; + big_p -= sum_g_vec * z; + + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + let inverse_y = Scalar::invert(&y); + let mut inv_exp_y = Scalar::one(); // start at y^-0 = 1 + for i in 0..self.n { + hprime_vec[i] = hprime_vec[i] * inv_exp_y; + big_p += hprime_vec[i] * (z * exp_y + z2 * exp_2); + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + inv_exp_y = inv_exp_y * inverse_y; // y^(-i) * y^(-1) -> y^(-(i+1)) + } + + // line 65: check that l, r are correct + let mut big_p_check = self.h * self.mu; + let points_iter = g_vec.iter().chain(hprime_vec.iter()); + let scalars_iter = self.l.iter().chain(self.r.iter()); + big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); + if big_p != big_p_check { + //println!("fails check on line 65: big_p != g * l + hprime * r"); + return false; + } + + // line 66: check that t is correct + if self.t != inner_product(&self.l, &self.r) { + //println!("fails check on line 66: t != l * r"); + return false; + } + + return true; + } +} + +impl PolyDeg3 { + pub fn new() -> PolyDeg3 { + PolyDeg3(Scalar::zero(), Scalar::zero(), Scalar::zero()) + } +} + +pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { + let mut generators = vec![RistrettoPoint::identity(); n]; + + generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); + for i in 1..n { + let prev = generators[i - 1].compress(); + generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); + } + generators +} + +pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { + let mut c1_digest = Sha512::new(); + c1_digest.input(v1.compress().as_bytes()); + c1_digest.input(v2.compress().as_bytes()); + let c1 = Scalar::from_hash(c1_digest); + + let mut c2_digest = Sha512::new(); + c2_digest.input(v1.compress().as_bytes()); + c2_digest.input(v2.compress().as_bytes()); + c2_digest.input(c1.as_bytes()); + let c2 = Scalar::from_hash(c2_digest); + + (c1, c2) +} + +pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { + let mut out = Scalar::zero(); + if a.len() != b.len() { + // throw some error + //println!("lengths of vectors don't match for inner product multiplication"); + } + for i in 0..a.len() { + out += a[i] * b[i]; + } + out +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } + #[test] + fn test_t() { + let rp = RangeProof::generate_proof(1, 1); + assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); + let rp = RangeProof::generate_proof(1, 2); + assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); + } + #[test] + fn test_verify_simple() { + for n in &[1, 2, 4, 8, 16, 32] { + //println!("n: {:?}", n); + let rp = RangeProof::generate_proof(0, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); + assert_eq!(rp.verify_proof(), true); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); + assert_eq!(rp.verify_proof(), false); + let rp = RangeProof::generate_proof(u64::max_value(), *n); + assert_eq!(rp.verify_proof(), false); + } + } + #[test] + fn test_verify_rand_big() { + for _ in 0..50 { + let mut rng: OsRng = OsRng::new().unwrap(); + let v: u64 = rng.next_u64(); + //println!("v: {:?}", v); + let rp = RangeProof::generate_proof(v, 32); + let expected = v <= 2u64.pow(32); + assert_eq!(rp.verify_proof(), expected); + } + } + #[test] + fn test_verify_rand_small() { + for _ in 0..50 { + let mut rng: OsRng = OsRng::new().unwrap(); + let v: u32 = rng.next_u32(); + //println!("v: {:?}", v); + let rp = RangeProof::generate_proof(v as u64, 32); + assert_eq!(rp.verify_proof(), true); + } + } +} + +#[cfg(all(test, feature = "bench"))] +mod bench { + use super::*; + use rand::Rng; + use test::Bencher; + + #[bench] + fn benchmark_make_generators(b: &mut Bencher) { + use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; + b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); + } + #[bench] + fn benchmark_make_proof_64(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); + } + #[bench] + fn benchmark_make_proof_32(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); + } + #[bench] + fn benchmark_verify_proof_64(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + let rp = RangeProof::generate_proof(rng.next_u64(), 64); + b.iter(|| rp.verify_proof()); + } + #[bench] + fn benchmark_verify_proof_32(b: &mut Bencher) { + let mut rng: OsRng = OsRng::new().unwrap(); + let rp = RangeProof::generate_proof(rng.next_u32() as u64, 32); + b.iter(|| rp.verify_proof()); + } +} From bbffac1e1fe4cd5569e0456aa86deead76d5c213 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 13 Feb 2018 15:52:38 -0800 Subject: [PATCH 023/186] added testfile --- Testfile | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 Testfile diff --git a/Testfile b/Testfile new file mode 100644 index 00000000..efcfd7b8 --- /dev/null +++ b/Testfile @@ -0,0 +1,2 @@ +rustfmt: rustfmt --version && cargo fmt -- --write-mode=diff +cargotest: cargo test From b142b053846c765c57967fe6861264786fe7737f Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 10:42:28 -0800 Subject: [PATCH 024/186] Change calculation for t0, t1, t2 --- src/range_proof.rs | 77 +++++++++++++++++++++++++++++----------------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index 902f7d43..4b639f8f 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -10,6 +10,9 @@ use rand::OsRng; struct PolyDeg3(Scalar, Scalar, Scalar); +struct VecPoly2(Vec, Vec); + + pub struct RangeProof { tau_x: Scalar, mu: Scalar, @@ -71,27 +74,34 @@ impl RangeProof { // Generate y, z by committing to A, S (line 46-48) let (y, z) = commit(&big_a, &big_s); - // Calculate t by calculating scalars t0, t1, t2 seperately + // Calculate t by calculating vectors l0, l1, r0, r1 and multiplying + let mut l = VecPoly2::new(n); + let mut r = VecPoly2::new(n); + let z2 = z * z; let mut t = PolyDeg3::new(); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let z2 = z * z; - let z3 = z2 * z; for i in 0..n { let v_i = (v >> i) & 1; - t.0 += exp_y * (z - z2) - z3 * exp_2; - t.1 += s_l[i] * exp_y * z + s_l[i] * z2 * exp_2 + s_r[i] * exp_y * (-z); - t.2 += s_l[i] * exp_y * s_r[i]; - // check if a_l is 0 or 1 + l.0[i] -= z; + l.1[i] += s_l[i]; + r.0[i] += exp_y * z + z2 * exp_2; + r.1[i] += exp_y * s_r[i]; if v_i == 0 { - t.1 -= s_l[i] * exp_y; + r.0[i] -= exp_y; } else { - t.0 += z2 * exp_2; - t.1 += s_r[i] * exp_y; + l.0[i] += Scalar::one(); } exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } + t.0 = inner_product(&l.0, &r.0); + t.2 = inner_product(&l.1, &r.1); + // use Karatsuba algorithm to find t.1 = l.0*r.1 + l.1*r.0 + let l_add = add_vec(&l.0, &l.1); + let r_add = add_vec(&r.0, &r.1); + let l_r_mul = inner_product(&l_add, &r_add); + t.1 = l_r_mul - t.0 - t.2; // Generate x by committing to big_t_1, big_t_2 (line 49-54) let tau_1 = Scalar::random(&mut rng); @@ -107,23 +117,8 @@ impl RangeProof { // Calculate l, r - which is only necessary if not doing IPP (line 55-57) // Adding this in a seperate loop so we can remove it easily later - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let mut l_total = Vec::new(); - let mut r_total = Vec::new(); - - for i in 0..n { - let a_l = (v >> i) & 1; - l_total.push(-z + s_l[i] * x); - r_total.push(exp_y * (z + s_r[i] * x) + z2 * exp_2); - if a_l == 0 { - r_total[i] -= exp_y; - } else { - l_total[i] += Scalar::one(); - } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } + let l_total = l.eval(x); + let r_total = r.eval(x); // Generate proof! (line 61) RangeProof { @@ -217,6 +212,20 @@ impl PolyDeg3 { } } +impl VecPoly2 { + pub fn new(n: usize) -> VecPoly2 { + VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + } + pub fn eval(&self, x: Scalar) -> Vec { + let n = self.0.len(); + let mut out = vec![Scalar::zero(); n]; + for i in 0..n { + out[i] += self.0[i] + self.1[i] * x; + } + out + } +} + pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { let mut generators = vec![RistrettoPoint::identity(); n]; @@ -247,7 +256,7 @@ pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { let mut out = Scalar::zero(); if a.len() != b.len() { // throw some error - //println!("lengths of vectors don't match for inner product multiplication"); + println!("lengths of vectors don't match for inner product multiplication"); } for i in 0..a.len() { out += a[i] * b[i]; @@ -255,6 +264,18 @@ pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { out } +pub fn add_vec(a: &Vec, b: &Vec) -> Vec { + let mut out = Vec::new(); + if a.len() != b.len() { + // throw some error + println!("lengths of vectors don't match for vector addition"); + } + for i in 0..a.len() { + out.push(a[i] + b[i]); + } + out +} + #[cfg(test)] mod tests { use super::*; From 411481f4906b040b229ccfdd57cd2c471a128437 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 15:06:51 -0800 Subject: [PATCH 025/186] add ipp outline --- src/inner_product_proof.rs | 67 ++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 + 2 files changed, 68 insertions(+) create mode 100644 src/inner_product_proof.rs diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs new file mode 100644 index 00000000..19303c5f --- /dev/null +++ b/src/inner_product_proof.rs @@ -0,0 +1,67 @@ +use std::iter; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::ristretto; +use curve25519_dalek::traits::Identity; +use curve25519_dalek::scalar::Scalar; + +pub struct Prover { + +} + +pub struct Proof { + g_vec: Vec, + h_vec: Vec, + u: RistrettoPoint, + p: RistrettoPoint, + + l_vec: Vec, + r_vec: Vec, + a_final: Scalar, + b_final: Scalar, +} + +impl Prover { + pub fn prove( + g_vec: Vec, + h_vec: Vec, + u: RistrettoPoint, + p: RistrettoPoint, + a: Vec, + b: Vec, + ) -> Proof { + let g_prime = g_vec.clone(); + let h_prime = h_vec.clone(); + let p_prime = p.clone(); + let n = g_prime.len(); // change to ln(g.len()) + let l_vec = Vec::with_capacity(n); + let r_vec = Vec::with_capacity(n); + let (a_final, b_final) = Prover::recurse(g_prime, h_prime, u, p_prime, &l_vec, &r_vec, a, b); + Proof { + g_vec: g_vec, + h_vec: h_vec, + u: u, + p: p, + l_vec: l_vec, + r_vec: r_vec, + a_final: a_final, + b_final: b_final, + } + } + + fn recurse( + g_vec: Vec, + h_vec: Vec, + u: RistrettoPoint, + p: RistrettoPoint, + l_vec: &Vec, + r_vec: &Vec, + a: Vec, + b: Vec, + ) -> (Scalar, Scalar) { + let n = g_vec.len(); + if n == 1 { + return (a[0], b[0]); + } + (a[0], b[0]) + } +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 0b4500f4..21551956 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,5 +10,6 @@ extern crate test; mod random_oracle; mod range_proof; +mod inner_product_proof; pub use range_proof::*; From 3c9dbef82cd9f8f5ec530f2dfbe636015978bb53 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 15:17:05 -0800 Subject: [PATCH 026/186] Work on recurse function --- src/inner_product_proof.rs | 19 ++++++++++++++++++- src/range_proof.rs | 4 ++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 19303c5f..f5780e6b 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -3,6 +3,8 @@ use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::traits::Identity; use curve25519_dalek::scalar::Scalar; +use range_proof::inner_product; +use range_proof::commit; // replace with the random oracle pub struct Prover { @@ -58,10 +60,25 @@ impl Prover { a: Vec, b: Vec, ) -> (Scalar, Scalar) { - let n = g_vec.len(); + let mut n = g_vec.len(); if n == 1 { return (a[0], b[0]); } + n = n/2; + let c_l = inner_product(&a[..n], &b[n..]); + let c_r = inner_product(&a[n..], &b[..n]); + + let l_points_iter = g_vec[n..].iter().chain(h_vec[..n].iter()).chain(iter::once(&u)); + let l_scalars_iter = a[..n].iter().chain(b[n..].iter()).chain(iter::once(&c_l)); + let big_l = ristretto::multiscalar_mult(l_scalars_iter, l_points_iter); + + let r_points_iter = g_vec[..n].iter().chain(h_vec[n..].iter()).chain(iter::once(&u)); + let r_scalars_iter = a[n..].iter().chain(b[..n].iter()).chain(iter::once(&c_r)); + let big_r = ristretto::multiscalar_mult(r_scalars_iter, r_points_iter); + + // TODO: use random oracle for the challenge instead + // TODO: store big_l, big_r in l_vec, r_vec + let (x, _) = commit(&big_l, &big_r); (a[0], b[0]) } } \ No newline at end of file diff --git a/src/range_proof.rs b/src/range_proof.rs index 4b639f8f..85b10bbc 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -252,7 +252,7 @@ pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { (c1, c2) } -pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len() != b.len() { // throw some error @@ -264,7 +264,7 @@ pub fn inner_product(a: &Vec, b: &Vec) -> Scalar { out } -pub fn add_vec(a: &Vec, b: &Vec) -> Vec { +pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { let mut out = Vec::new(); if a.len() != b.len() { // throw some error From 65cc1e88187643e0fa47fcd4664d3defd10fcc68 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 15:29:27 -0800 Subject: [PATCH 027/186] change to loop instead of recursive call --- src/inner_product_proof.rs | 80 ++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 46 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index f5780e6b..1252a07d 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,7 +1,6 @@ use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; -use curve25519_dalek::traits::Identity; use curve25519_dalek::scalar::Scalar; use range_proof::inner_product; use range_proof::commit; // replace with the random oracle @@ -28,57 +27,46 @@ impl Prover { h_vec: Vec, u: RistrettoPoint, p: RistrettoPoint, - a: Vec, - b: Vec, - ) -> Proof { + mut a: Vec, + mut b: Vec, + ) -> Option { let g_prime = g_vec.clone(); let h_prime = h_vec.clone(); let p_prime = p.clone(); - let n = g_prime.len(); // change to ln(g.len()) - let l_vec = Vec::with_capacity(n); - let r_vec = Vec::with_capacity(n); - let (a_final, b_final) = Prover::recurse(g_prime, h_prime, u, p_prime, &l_vec, &r_vec, a, b); - Proof { - g_vec: g_vec, - h_vec: h_vec, - u: u, - p: p, - l_vec: l_vec, - r_vec: r_vec, - a_final: a_final, - b_final: b_final, - } - } + let ln_n = g_vec.len(); // change to ln(g.len()) + let l_vec = Vec::with_capacity(ln_n); + let r_vec = Vec::with_capacity(ln_n); - fn recurse( - g_vec: Vec, - h_vec: Vec, - u: RistrettoPoint, - p: RistrettoPoint, - l_vec: &Vec, - r_vec: &Vec, - a: Vec, - b: Vec, - ) -> (Scalar, Scalar) { - let mut n = g_vec.len(); - if n == 1 { - return (a[0], b[0]); - } - n = n/2; - let c_l = inner_product(&a[..n], &b[n..]); - let c_r = inner_product(&a[n..], &b[..n]); + for j in ln_n..1 { + let mut n = g_vec.len(); + if n == 1 { + return Some(Proof { + g_vec: g_vec, + h_vec: h_vec, + u: u, + p: p, + l_vec: l_vec, + r_vec: r_vec, + a_final: a[0], + b_final: b[0], + }) + } + n = n/2; + let c_l = inner_product(&a[..n], &b[n..]); + let c_r = inner_product(&a[n..], &b[..n]); - let l_points_iter = g_vec[n..].iter().chain(h_vec[..n].iter()).chain(iter::once(&u)); - let l_scalars_iter = a[..n].iter().chain(b[n..].iter()).chain(iter::once(&c_l)); - let big_l = ristretto::multiscalar_mult(l_scalars_iter, l_points_iter); + let l_points_iter = g_prime[n..].iter().chain(h_prime[..n].iter()).chain(iter::once(&u)); + let l_scalars_iter = a[..n].iter().chain(b[n..].iter()).chain(iter::once(&c_l)); + let big_l = ristretto::multiscalar_mult(l_scalars_iter, l_points_iter); - let r_points_iter = g_vec[..n].iter().chain(h_vec[n..].iter()).chain(iter::once(&u)); - let r_scalars_iter = a[n..].iter().chain(b[..n].iter()).chain(iter::once(&c_r)); - let big_r = ristretto::multiscalar_mult(r_scalars_iter, r_points_iter); + let r_points_iter = g_prime[..n].iter().chain(h_prime[n..].iter()).chain(iter::once(&u)); + let r_scalars_iter = a[n..].iter().chain(b[..n].iter()).chain(iter::once(&c_r)); + let big_r = ristretto::multiscalar_mult(r_scalars_iter, r_points_iter); - // TODO: use random oracle for the challenge instead - // TODO: store big_l, big_r in l_vec, r_vec - let (x, _) = commit(&big_l, &big_r); - (a[0], b[0]) + // TODO: use random oracle for the challenge instead + // TODO: store big_l, big_r in l_vec, r_vec + let (x, _) = commit(&big_l, &big_r); + } + None } } \ No newline at end of file From 1bd3cb69816f3690661887d00e67991e4fe27358 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 16:25:17 -0800 Subject: [PATCH 028/186] first pass on proving --- src/inner_product_proof.rs | 105 ++++++++++++++++++++++--------------- 1 file changed, 63 insertions(+), 42 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 1252a07d..b470fd3c 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,3 +1,5 @@ +#![allow(non_snake_case)] + use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; @@ -10,10 +12,10 @@ pub struct Prover { } pub struct Proof { - g_vec: Vec, - h_vec: Vec, - u: RistrettoPoint, - p: RistrettoPoint, + // g_vec: Vec, + // h_vec: Vec, + // u: RistrettoPoint, + // p: RistrettoPoint, l_vec: Vec, r_vec: Vec, @@ -23,50 +25,69 @@ pub struct Proof { impl Prover { pub fn prove( - g_vec: Vec, - h_vec: Vec, - u: RistrettoPoint, - p: RistrettoPoint, - mut a: Vec, - mut b: Vec, - ) -> Option { - let g_prime = g_vec.clone(); - let h_prime = h_vec.clone(); - let p_prime = p.clone(); - let ln_n = g_vec.len(); // change to ln(g.len()) - let l_vec = Vec::with_capacity(ln_n); - let r_vec = Vec::with_capacity(ln_n); + mut G_vec: Vec, + mut H_vec: Vec, + Q: RistrettoPoint, + mut P: RistrettoPoint, + mut a_vec: Vec, + mut b_vec: Vec, + ) -> Proof { + let G = &mut G_vec[..]; + let H = &mut H_vec[..]; + let a = &mut a_vec[..]; + let b = &mut b_vec[..]; - for j in ln_n..1 { - let mut n = g_vec.len(); - if n == 1 { - return Some(Proof { - g_vec: g_vec, - h_vec: h_vec, - u: u, - p: p, - l_vec: l_vec, - r_vec: r_vec, - a_final: a[0], - b_final: b[0], - }) - } + let mut n = G.len(); + let lg_n = n.next_power_of_two().trailing_zeros() as usize; + let mut L_vec = Vec::with_capacity(lg_n); + let mut R_vec = Vec::with_capacity(lg_n); + + while n != 1 { n = n/2; - let c_l = inner_product(&a[..n], &b[n..]); - let c_r = inner_product(&a[n..], &b[..n]); + let (a_l, a_r) = a.split_at_mut(n); + let (b_l, b_r) = b.split_at_mut(n); + + let (G_l, G_r) = G.split_at_mut(n); + let (H_l, H_r) = H.split_at_mut(n); + + let c_l = inner_product(&a_l, &b_r); + let c_r = inner_product(&a_r, &b_l); - let l_points_iter = g_prime[n..].iter().chain(h_prime[..n].iter()).chain(iter::once(&u)); - let l_scalars_iter = a[..n].iter().chain(b[n..].iter()).chain(iter::once(&c_l)); - let big_l = ristretto::multiscalar_mult(l_scalars_iter, l_points_iter); + let L = ristretto::multiscalar_mult( + a_l.iter().chain(b_r.iter()).chain(iter::once(&c_l)), + G_r.iter().chain(H_l.iter()).chain(iter::once(&Q)) + ); - let r_points_iter = g_prime[..n].iter().chain(h_prime[n..].iter()).chain(iter::once(&u)); - let r_scalars_iter = a[n..].iter().chain(b[..n].iter()).chain(iter::once(&c_r)); - let big_r = ristretto::multiscalar_mult(r_scalars_iter, r_points_iter); + let R = ristretto::multiscalar_mult( + a_r.iter().chain(b_l.iter()).chain(iter::once(&c_r)), + G_l.iter().chain(H_r.iter()).chain(iter::once(&Q)) + ); + + L_vec.push(L); + R_vec.push(R); // TODO: use random oracle for the challenge instead - // TODO: store big_l, big_r in l_vec, r_vec - let (x, _) = commit(&big_l, &big_r); + let (x, _) = commit(&L, &R); + let x_inv = x.invert(); + + for i in 0..n { + a_l[i] = a_l[i] * x + a_r[i] * x_inv; + b_l[i] = b_l[i] * x_inv + b_r[i] * x; + G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); + H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); + } + P += ristretto::multiscalar_mult(&[x*x, x_inv*x_inv], &[L, R]); + a = a_l; + b = b_l; + G = G_l; + H = H_l; + } + debug_assert_eq!(a.len(), 1); + return Proof { + l_vec: L_vec, + r_vec: R_vec, + a_final: a[0], + b_final: b[0], } - None } } \ No newline at end of file From f27d2128b3ec7b34a9c365c445f5cf12b6d49f81 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 14 Feb 2018 17:30:43 -0800 Subject: [PATCH 029/186] assuming nightly, so we dont need a bench feature (#7) --- Cargo.toml | 1 - README.md | 2 +- src/lib.rs | 4 ++-- src/range_proof.rs | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 86e25491..30ef520f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,4 +15,3 @@ hex = "^0.3" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] std = ["curve25519-dalek/std"] -bench = [] diff --git a/README.md b/README.md index 45cc9e46..bfca986c 100644 --- a/README.md +++ b/README.md @@ -15,5 +15,5 @@ cargo test Run benchmarks: ``` -cargo bench --features="bench" +cargo bench ``` diff --git a/src/lib.rs b/src/lib.rs index 0b4500f4..3cecd8d3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,11 @@ -#![cfg_attr(feature = "bench", feature(test))] +#![feature(test)] extern crate curve25519_dalek; extern crate sha2; extern crate rand; extern crate tiny_keccak; -#[cfg(all(test, feature = "bench"))] +#[cfg(test)] extern crate test; mod random_oracle; diff --git a/src/range_proof.rs b/src/range_proof.rs index 4b639f8f..ed4963b7 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -343,7 +343,7 @@ mod tests { } } -#[cfg(all(test, feature = "bench"))] +#[cfg(test)] mod bench { use super::*; use rand::Rng; From d2d774df32f9a9fc48afe21f36d6ed91347e5ced Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 14 Feb 2018 17:40:04 -0800 Subject: [PATCH 030/186] Finish prover for inner product proof --- src/inner_product_proof.rs | 58 ++++++++++++++++++++++++++++++-------- src/lib.rs | 4 +-- 2 files changed, 49 insertions(+), 13 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index b470fd3c..3383bd59 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,4 +1,5 @@ #![allow(non_snake_case)] +#![feature(nll)] use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; @@ -6,17 +7,14 @@ use curve25519_dalek::ristretto; use curve25519_dalek::scalar::Scalar; use range_proof::inner_product; use range_proof::commit; // replace with the random oracle +use range_proof::make_generators; +use sha2::Sha256; pub struct Prover { } pub struct Proof { - // g_vec: Vec, - // h_vec: Vec, - // u: RistrettoPoint, - // p: RistrettoPoint, - l_vec: Vec, r_vec: Vec, a_final: Scalar, @@ -27,15 +25,15 @@ impl Prover { pub fn prove( mut G_vec: Vec, mut H_vec: Vec, - Q: RistrettoPoint, mut P: RistrettoPoint, + Q: RistrettoPoint, mut a_vec: Vec, mut b_vec: Vec, ) -> Proof { - let G = &mut G_vec[..]; - let H = &mut H_vec[..]; - let a = &mut a_vec[..]; - let b = &mut b_vec[..]; + let mut G = &mut G_vec[..]; + let mut H = &mut H_vec[..]; + let mut a = &mut a_vec[..]; + let mut b = &mut b_vec[..]; let mut n = G.len(); let lg_n = n.next_power_of_two().trailing_zeros() as usize; @@ -46,9 +44,16 @@ impl Prover { n = n/2; let (a_l, a_r) = a.split_at_mut(n); let (b_l, b_r) = b.split_at_mut(n); - let (G_l, G_r) = G.split_at_mut(n); let (H_l, H_r) = H.split_at_mut(n); + // let a_l = &a[0..n]; + // let a_r = &a[n..n*2]; + // let b_l = &b[0..n]; + // let b_r = &b[n..n*2]; + // let G_l = &G[0..n]; + // let G_r = &G[n..n*2]; + // let H_l = &H[0..n]; + // let H_r = &H[n..n*2]; let c_l = inner_product(&a_l, &b_r); let c_r = inner_product(&a_r, &b_l); @@ -90,4 +95,35 @@ impl Prover { b_final: b[0], } } +} + +#[cfg(test)] +mod tests { + use super::*; + #[test] + fn test_prover_basic() { + + } +} + +#[cfg(test)] +mod bench { + + use super::*; + use test::Bencher; + + #[bench] + fn benchmark_prover_basic(b: &mut Bencher) { + let n = 64; + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G_vec = make_generators(G, n); + let H_vec = make_generators(H, n); + let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); + let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; + let b_vec = vec![Scalar::from_u64(2); n]; + + b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); + } } \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 21551956..740a817e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,11 @@ #![cfg_attr(feature = "bench", feature(test))] +#![feature(nll)] +#![feature(test)] extern crate curve25519_dalek; extern crate sha2; extern crate rand; extern crate tiny_keccak; - -#[cfg(all(test, feature = "bench"))] extern crate test; mod random_oracle; From b63b0f5c1fd0dc1e841766759dd4adc17a64acd7 Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 15 Feb 2018 11:59:18 -0800 Subject: [PATCH 031/186] rename variables in range proof to match new symbols --- src/range_proof.rs | 114 ++++++++++++++++++++++----------------------- 1 file changed, 57 insertions(+), 57 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index ed4963b7..923dc60f 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -14,8 +14,8 @@ struct VecPoly2(Vec, Vec); pub struct RangeProof { - tau_x: Scalar, - mu: Scalar, + t_x_blinding: Scalar, + e_blinding: Scalar, t: Scalar, // don't need if doing inner product proof @@ -23,11 +23,11 @@ pub struct RangeProof { r: Vec, // committed values - big_v: RistrettoPoint, - big_a: RistrettoPoint, - big_s: RistrettoPoint, - big_t_1: RistrettoPoint, - big_t_2: RistrettoPoint, + V: RistrettoPoint, + A: RistrettoPoint, + S: RistrettoPoint, + T_1: RistrettoPoint, + T_2: RistrettoPoint, // public knowledge n: usize, @@ -41,38 +41,38 @@ impl RangeProof { // useful for debugging: // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - // Setup: generate groups g & h, commit to v (line 34) - let g = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let h = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let g_vec = make_generators(g, n); - let h_vec = make_generators(h, n); - let gamma = Scalar::random(&mut rng); - let big_v = h * gamma + g * Scalar::from_u64(v); - - // Compute big_a (line 39-42) - let alpha = Scalar::random(&mut rng); - let mut big_a = h * alpha; + // Setup (line 34) + let B = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let B_blinding = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G = make_generators(B, n); + let H = make_generators(B_blinding, n); + let v_blinding = Scalar::random(&mut rng); + let V = B_blinding * v_blinding + B * Scalar::from_u64(v); + + // Compute A (line 39-42) + let a_blinding = Scalar::random(&mut rng); + let mut A = B_blinding * a_blinding; for i in 0..n { let v_i = (v >> i) & 1; if v_i == 0 { - big_a -= h_vec[i]; + A -= H[i]; } else { - big_a += g_vec[i]; + A += G[i]; } } - // Compute big_s (in the paper: S; line 43-45) - let points_iter = iter::once(h).chain(g_vec.iter()).chain(h_vec.iter()); + // Compute S (in the paper: S; line 43-45) + let points_iter = iter::once(B_blinding).chain(G.iter()).chain(H.iter()); let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); - let big_s = ristretto::multiscalar_mult(&randomness, points_iter); + let S = ristretto::multiscalar_mult(&randomness, points_iter); - // Save/label randomness (rho, s_L, s_R) to be used later - let rho = &randomness[0]; - let s_l = &randomness[1..(n + 1)]; - let s_r = &randomness[(n + 1)..(1 + 2 * n)]; + // Save/label randomness (s_blinding, s_a, s_b) to be used later + let s_blinding = &randomness[0]; + let s_a = &randomness[1..(n + 1)]; + let s_b = &randomness[(n + 1)..(1 + 2 * n)]; // Generate y, z by committing to A, S (line 46-48) - let (y, z) = commit(&big_a, &big_s); + let (y, z) = commit(&A, &S); // Calculate t by calculating vectors l0, l1, r0, r1 and multiplying let mut l = VecPoly2::new(n); @@ -84,9 +84,9 @@ impl RangeProof { for i in 0..n { let v_i = (v >> i) & 1; l.0[i] -= z; - l.1[i] += s_l[i]; + l.1[i] += s_a[i]; r.0[i] += exp_y * z + z2 * exp_2; - r.1[i] += exp_y * s_r[i]; + r.1[i] += exp_y * s_b[i]; if v_i == 0 { r.0[i] -= exp_y; } else { @@ -103,16 +103,16 @@ impl RangeProof { let l_r_mul = inner_product(&l_add, &r_add); t.1 = l_r_mul - t.0 - t.2; - // Generate x by committing to big_t_1, big_t_2 (line 49-54) - let tau_1 = Scalar::random(&mut rng); - let tau_2 = Scalar::random(&mut rng); - let big_t_1 = g * t.1 + h * tau_1; - let big_t_2 = g * t.2 + h * tau_2; - let (x, _) = commit(&big_t_1, &big_t_2); // TODO: use a different commit? + // Generate x by committing to T_1, T_2 (line 49-54) + let t_1_blinding = Scalar::random(&mut rng); + let t_2_blinding = Scalar::random(&mut rng); + let T_1 = B * t.1 + B_blinding * t_1_blinding; + let T_2 = B * t.2 + B_blinding * t_2_blinding; + let (x, _) = commit(&T_1, &T_2); // TODO: use a different commit? // Generate final values for proof (line 55-60) - let tau_x = tau_1 * x + tau_2 * x * x + z2 * gamma; - let mu = alpha + rho * x; + let t_x_blinding = t_1_blinding * x + t_2_blinding * x * x + z2 * v_blinding; + let e_blinding = a_blinding + s_blinding * x; let t_hat = t.0 + t.1 * x + t.2 * x * x; // Calculate l, r - which is only necessary if not doing IPP (line 55-57) @@ -122,28 +122,28 @@ impl RangeProof { // Generate proof! (line 61) RangeProof { - tau_x: tau_x, - mu: mu, + t_x_blinding: t_x_blinding, + e_blinding: e_blinding, t: t_hat, l: l_total, r: r_total, - big_v: big_v, - big_a: big_a, - big_s: big_s, - big_t_1: big_t_1, - big_t_2: big_t_2, + V: V, + A: A, + S: S, + T_1: T_1, + T_2: T_2, n: n, - g: *g, - h: *h, + g: *B, + h: *B_blinding, } } pub fn verify_proof(&self) -> bool { - let (y, z) = commit(&self.big_a, &self.big_s); - let (x, _) = commit(&self.big_t_1, &self.big_t_2); - let g_vec = make_generators(&self.g, self.n); + let (y, z) = commit(&self.A, &self.S); + let (x, _) = commit(&self.T_1, &self.T_2); + let G = make_generators(&self.g, self.n); let mut hprime_vec = make_generators(&self.h, self.n); // line 63: check that t = t0 + t1 * x + t2 * x * x @@ -158,8 +158,8 @@ impl RangeProof { exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - let t_check = self.g * power_g + self.big_v * z2 + self.big_t_1 * x + self.big_t_2 * x * x; - let t_commit = self.g * self.t + self.h * self.tau_x; + let t_check = self.g * power_g + self.V * z2 + self.T_1 * x + self.T_2 * x * x; + let t_commit = self.g * self.t + self.h * self.t_x_blinding; if t_commit != t_check { //println!("fails check on line 63"); return false; @@ -167,12 +167,12 @@ impl RangeProof { // line 62: calculate hprime // line 64: compute commitment to l, r - let mut sum_g_vec = RistrettoPoint::identity(); + let mut sum_G = RistrettoPoint::identity(); for i in 0..self.n { - sum_g_vec += g_vec[i]; + sum_G += G[i]; } - let mut big_p = self.big_a + self.big_s * x; - big_p -= sum_g_vec * z; + let mut big_p = self.A + self.S * x; + big_p -= sum_G * z; let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 @@ -188,7 +188,7 @@ impl RangeProof { // line 65: check that l, r are correct let mut big_p_check = self.h * self.mu; - let points_iter = g_vec.iter().chain(hprime_vec.iter()); + let points_iter = G.iter().chain(hprime_vec.iter()); let scalars_iter = self.l.iter().chain(self.r.iter()); big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); if big_p != big_p_check { From 5d91fcc83d2c0d3ac3f6d99732df5e0f7ee2e742 Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 15 Feb 2018 12:09:01 -0800 Subject: [PATCH 032/186] update comments --- src/range_proof.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index 923dc60f..1bb8a3c1 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -41,7 +41,7 @@ impl RangeProof { // useful for debugging: // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - // Setup (line 34) + // Setup: generate points, commit to v (in the paper: g, h, bold(g), bolg(h); line 34) let B = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let B_blinding = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G = make_generators(B, n); @@ -61,12 +61,12 @@ impl RangeProof { } } - // Compute S (in the paper: S; line 43-45) + // Compute S (line 43-45) let points_iter = iter::once(B_blinding).chain(G.iter()).chain(H.iter()); let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); let S = ristretto::multiscalar_mult(&randomness, points_iter); - // Save/label randomness (s_blinding, s_a, s_b) to be used later + // Save/label randomness to be used later (in the paper: rho, s_L, s_R) let s_blinding = &randomness[0]; let s_a = &randomness[1..(n + 1)]; let s_b = &randomness[(n + 1)..(1 + 2 * n)]; From a466494f4d168ee529918ad04a6f9ce48b5502c9 Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 15 Feb 2018 12:10:54 -0800 Subject: [PATCH 033/186] fix errors --- src/range_proof.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index 1bb8a3c1..e1e1ab52 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -31,8 +31,8 @@ pub struct RangeProof { // public knowledge n: usize, - g: RistrettoPoint, - h: RistrettoPoint, + B: RistrettoPoint, + B_blinding: RistrettoPoint, } impl RangeProof { @@ -135,16 +135,16 @@ impl RangeProof { T_2: T_2, n: n, - g: *B, - h: *B_blinding, + B: *B, + B_blinding: *B_blinding, } } pub fn verify_proof(&self) -> bool { let (y, z) = commit(&self.A, &self.S); let (x, _) = commit(&self.T_1, &self.T_2); - let G = make_generators(&self.g, self.n); - let mut hprime_vec = make_generators(&self.h, self.n); + let G = make_generators(&self.B, self.n); + let mut hprime_vec = make_generators(&self.B_blinding, self.n); // line 63: check that t = t0 + t1 * x + t2 * x * x let z2 = z * z; @@ -158,8 +158,8 @@ impl RangeProof { exp_y = exp_y * y; // y^i -> y^(i+1) exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - let t_check = self.g * power_g + self.V * z2 + self.T_1 * x + self.T_2 * x * x; - let t_commit = self.g * self.t + self.h * self.t_x_blinding; + let t_check = self.B * power_g + self.V * z2 + self.T_1 * x + self.T_2 * x * x; + let t_commit = self.B * self.t + self.B_blinding * self.t_x_blinding; if t_commit != t_check { //println!("fails check on line 63"); return false; @@ -187,7 +187,7 @@ impl RangeProof { } // line 65: check that l, r are correct - let mut big_p_check = self.h * self.mu; + let mut big_p_check = self.B_blinding * self.e_blinding; let points_iter = G.iter().chain(hprime_vec.iter()); let scalars_iter = self.l.iter().chain(self.r.iter()); big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); From 198d83758c193404191f1ffbcdd3de7b52997aaa Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 15 Feb 2018 16:41:56 -0800 Subject: [PATCH 034/186] Update test names, add more tests --- src/inner_product_proof.rs | 64 +++++++++++++++++++++++++++++++------- src/range_proof.rs | 18 +++++------ 2 files changed, 61 insertions(+), 21 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 3383bd59..b9a8a514 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -9,7 +9,6 @@ use range_proof::inner_product; use range_proof::commit; // replace with the random oracle use range_proof::make_generators; use sha2::Sha256; - pub struct Prover { } @@ -45,15 +44,7 @@ impl Prover { let (a_l, a_r) = a.split_at_mut(n); let (b_l, b_r) = b.split_at_mut(n); let (G_l, G_r) = G.split_at_mut(n); - let (H_l, H_r) = H.split_at_mut(n); - // let a_l = &a[0..n]; - // let a_r = &a[n..n*2]; - // let b_l = &b[0..n]; - // let b_r = &b[n..n*2]; - // let G_l = &G[0..n]; - // let G_r = &G[n..n*2]; - // let H_l = &H[0..n]; - // let H_r = &H[n..n*2]; + let (H_l, H_r) = H.split_at_mut(n); let c_l = inner_product(&a_l, &b_r); let c_r = inner_product(&a_r, &b_l); @@ -81,6 +72,7 @@ impl Prover { G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); } + P += ristretto::multiscalar_mult(&[x*x, x_inv*x_inv], &[L, R]); a = a_l; b = b_l; @@ -101,8 +93,42 @@ impl Prover { mod tests { use super::*; #[test] - fn test_prover_basic() { + fn make_ipp_64() { + let n = 64; + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G_vec = make_generators(G, n); + let H_vec = make_generators(H, n); + let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); + let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; + let b_vec = vec![Scalar::from_u64(2); n]; + + let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); + + assert_eq!(proof.a_final.as_bytes(), + &[61, 162, 237, 210, 105, 26, 179, 39, 111, 70, 186, 58, 83, 18, 46, 189, 41, 225, 70, 190, 73, 180, 43, 17, 86, 38, 166, 174, 31, 71, 100, 4]); + assert_eq!(proof.b_final.as_bytes(), + &[122, 68, 219, 165, 211, 52, 102, 79, 222, 140, 116, 117, 166, 36, 92, 122, 83, 194, 141, 124, 147, 104, 87, 34, 172, 76, 76, 93, 63, 142, 200, 8]); + } + #[test] + fn make_ipp_32() { + let n = 32; + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G_vec = make_generators(G, n); + let H_vec = make_generators(H, n); + let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); + let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; + let b_vec = vec![Scalar::from_u64(2); n]; + let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); + + assert_eq!(proof.a_final.as_bytes(), + &[108, 163, 168, 218, 202, 249, 219, 101, 99, 124, 105, 179, 50, 105, 192, 39, 195, 72, 222, 43, 160, 80, 14, 59, 46, 245, 156, 102, 39, 63, 166, 10]); + assert_eq!(proof.b_final.as_bytes(), + &[235, 114, 91, 88, 123, 144, 165, 115, 240, 91, 219, 195, 134, 216, 161, 58, 134, 145, 188, 87, 64, 161, 28, 118, 92, 234, 57, 205, 78, 126, 76, 5]); } } @@ -113,7 +139,7 @@ mod bench { use test::Bencher; #[bench] - fn benchmark_prover_basic(b: &mut Bencher) { + fn make_ipp_64(b: &mut Bencher) { let n = 64; let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); @@ -124,6 +150,20 @@ mod bench { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; + b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); + } + #[bench] + fn make_ipp_32(b: &mut Bencher) { + let n = 32; + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G_vec = make_generators(G, n); + let H_vec = make_generators(H, n); + let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); + let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; + let b_vec = vec![Scalar::from_u64(2); n]; + b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); } } \ No newline at end of file diff --git a/src/range_proof.rs b/src/range_proof.rs index 85b10bbc..f80970d0 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -298,14 +298,14 @@ mod tests { assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } #[test] - fn test_t() { + fn test_rp_t() { let rp = RangeProof::generate_proof(1, 1); assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); let rp = RangeProof::generate_proof(1, 2); assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); } #[test] - fn test_verify_simple() { + fn verify_rp_simple() { for n in &[1, 2, 4, 8, 16, 32] { //println!("n: {:?}", n); let rp = RangeProof::generate_proof(0, *n); @@ -321,7 +321,7 @@ mod tests { } } #[test] - fn test_verify_rand_big() { + fn verify_rp_rand_big() { for _ in 0..50 { let mut rng: OsRng = OsRng::new().unwrap(); let v: u64 = rng.next_u64(); @@ -332,7 +332,7 @@ mod tests { } } #[test] - fn test_verify_rand_small() { + fn verify_rp_rand_small() { for _ in 0..50 { let mut rng: OsRng = OsRng::new().unwrap(); let v: u32 = rng.next_u32(); @@ -350,28 +350,28 @@ mod bench { use test::Bencher; #[bench] - fn benchmark_make_generators(b: &mut Bencher) { + fn make_generators(b: &mut Bencher) { use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); } #[bench] - fn benchmark_make_proof_64(b: &mut Bencher) { + fn make_rp_64(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); } #[bench] - fn benchmark_make_proof_32(b: &mut Bencher) { + fn make_rp_32(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); } #[bench] - fn benchmark_verify_proof_64(b: &mut Bencher) { + fn verify_rp_64(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); let rp = RangeProof::generate_proof(rng.next_u64(), 64); b.iter(|| rp.verify_proof()); } #[bench] - fn benchmark_verify_proof_32(b: &mut Bencher) { + fn verify_rp_32(b: &mut Bencher) { let mut rng: OsRng = OsRng::new().unwrap(); let rp = RangeProof::generate_proof(rng.next_u32() as u64, 32); b.iter(|| rp.verify_proof()); From bf4def043cc600cfb541189acc4a4fdad31d4873 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 16:15:12 -0800 Subject: [PATCH 035/186] replace arrays with bytestrings for test vectors --- src/inner_product_proof.rs | 14 +++++-------- src/scalar.rs | 40 ++++++-------------------------------- 2 files changed, 11 insertions(+), 43 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index b9a8a514..803e4f9d 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -106,10 +106,8 @@ mod tests { let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); - assert_eq!(proof.a_final.as_bytes(), - &[61, 162, 237, 210, 105, 26, 179, 39, 111, 70, 186, 58, 83, 18, 46, 189, 41, 225, 70, 190, 73, 180, 43, 17, 86, 38, 166, 174, 31, 71, 100, 4]); - assert_eq!(proof.b_final.as_bytes(), - &[122, 68, 219, 165, 211, 52, 102, 79, 222, 140, 116, 117, 166, 36, 92, 122, 83, 194, 141, 124, 147, 104, 87, 34, 172, 76, 76, 93, 63, 142, 200, 8]); + assert_eq!(proof.a_final.as_bytes(), b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04"); + assert_eq!(proof.b_final.as_bytes(), b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08"); } #[test] fn make_ipp_32() { @@ -125,10 +123,8 @@ mod tests { let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); - assert_eq!(proof.a_final.as_bytes(), - &[108, 163, 168, 218, 202, 249, 219, 101, 99, 124, 105, 179, 50, 105, 192, 39, 195, 72, 222, 43, 160, 80, 14, 59, 46, 245, 156, 102, 39, 63, 166, 10]); - assert_eq!(proof.b_final.as_bytes(), - &[235, 114, 91, 88, 123, 144, 165, 115, 240, 91, 219, 195, 134, 216, 161, 58, 134, 145, 188, 87, 64, 161, 28, 118, 92, 234, 57, 205, 78, 126, 76, 5]); + assert_eq!(proof.a_final.as_bytes(), b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n"); + assert_eq!(proof.b_final.as_bytes(), b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05"); } } @@ -166,4 +162,4 @@ mod bench { b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); } -} \ No newline at end of file +} diff --git a/src/scalar.rs b/src/scalar.rs index ec915af4..fc838ea7 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -54,40 +54,12 @@ pub fn batch_invert(inputs: &mut [Scalar]) { mod test { use super::*; - #[test] +#[test] fn batch_invert_matches_nonbatched() { - let W = Scalar::from_bits( - [ - 0x84, 0xfc, 0xbc, 0x4f, 0x78, 0x12, 0xa0, 0x06, - 0xd7, 0x91, 0xd9, 0x7a, 0x3a, 0x27, 0xdd, 0x1e, - 0x21, 0x43, 0x45, 0xf7, 0xb1, 0xb9, 0x56, 0x7a, - 0x81, 0x30, 0x73, 0x44, 0x96, 0x85, 0xb5, 0x07, - ] - ); - let X = Scalar::from_bits( - [ - 0x4e, 0x5a, 0xb4, 0x34, 0x5d, 0x47, 0x08, 0x84, - 0x59, 0x13, 0xb4, 0x64, 0x1b, 0xc2, 0x7d, 0x52, - 0x52, 0xa5, 0x85, 0x10, 0x1b, 0xcc, 0x42, 0x44, - 0xd4, 0x49, 0xf4, 0xa8, 0x79, 0xd9, 0xf2, 0x04, - ] - ); - let Y = Scalar::from_bits( - [ - 0x90, 0x76, 0x33, 0xfe, 0x1c, 0x4b, 0x66, 0xa4, - 0xa2, 0x8d, 0x2d, 0xd7, 0x67, 0x83, 0x86, 0xc3, - 0x53, 0xd0, 0xde, 0x54, 0x55, 0xd4, 0xfc, 0x9d, - 0xe8, 0xef, 0x7a, 0xc3, 0x1f, 0x35, 0xbb, 0x05, - ] - ); - let Z = Scalar::from_bits( - [ - 0x05, 0x9d, 0x3e, 0x0b, 0x09, 0x26, 0x50, 0x3d, - 0xa3, 0x84, 0xa1, 0x3c, 0x92, 0x7a, 0xc2, 0x06, - 0x41, 0x98, 0xcf, 0x34, 0x3a, 0x24, 0xd5, 0xb7, - 0xeb, 0x33, 0x6a, 0x2d, 0xfc, 0x11, 0x21, 0x0b, - ] - ); + let W = Scalar::from_bits(b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07"); + let X = Scalar::from_bits(b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04"); + let Y = Scalar::from_bits(b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05"); + let Z = Scalar::from_bits(b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b"); let list = vec![W, X, Y, Z, W*Y, X*Z, Y*Y, W*Z]; let mut inv_list = list.clone(); @@ -96,4 +68,4 @@ mod test { assert_eq!(list[i].invert(), inv_list[i]); } } -} \ No newline at end of file +} From 24b2dc8b42a4d8182c0f20f098456fd85e8789b9 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 16:39:18 -0800 Subject: [PATCH 036/186] apply rustfmt --- src/inner_product_proof.rs | 230 ++++++++++++++++++++++--------------- src/scalar.rs | 42 ++++--- 2 files changed, 161 insertions(+), 111 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 803e4f9d..2ace4d85 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -9,92 +9,90 @@ use range_proof::inner_product; use range_proof::commit; // replace with the random oracle use range_proof::make_generators; use sha2::Sha256; -pub struct Prover { - -} +pub struct Prover {} pub struct Proof { - l_vec: Vec, - r_vec: Vec, - a_final: Scalar, - b_final: Scalar, + l_vec: Vec, + r_vec: Vec, + a_final: Scalar, + b_final: Scalar, } impl Prover { - pub fn prove( - mut G_vec: Vec, - mut H_vec: Vec, - mut P: RistrettoPoint, - Q: RistrettoPoint, - mut a_vec: Vec, - mut b_vec: Vec, - ) -> Proof { - let mut G = &mut G_vec[..]; - let mut H = &mut H_vec[..]; - let mut a = &mut a_vec[..]; - let mut b = &mut b_vec[..]; - - let mut n = G.len(); - let lg_n = n.next_power_of_two().trailing_zeros() as usize; - let mut L_vec = Vec::with_capacity(lg_n); - let mut R_vec = Vec::with_capacity(lg_n); - - while n != 1 { - n = n/2; - let (a_l, a_r) = a.split_at_mut(n); - let (b_l, b_r) = b.split_at_mut(n); - let (G_l, G_r) = G.split_at_mut(n); - let (H_l, H_r) = H.split_at_mut(n); - - let c_l = inner_product(&a_l, &b_r); - let c_r = inner_product(&a_r, &b_l); - - let L = ristretto::multiscalar_mult( - a_l.iter().chain(b_r.iter()).chain(iter::once(&c_l)), - G_r.iter().chain(H_l.iter()).chain(iter::once(&Q)) - ); - - let R = ristretto::multiscalar_mult( - a_r.iter().chain(b_l.iter()).chain(iter::once(&c_r)), - G_l.iter().chain(H_r.iter()).chain(iter::once(&Q)) - ); - - L_vec.push(L); - R_vec.push(R); - - // TODO: use random oracle for the challenge instead - let (x, _) = commit(&L, &R); - let x_inv = x.invert(); - - for i in 0..n { - a_l[i] = a_l[i] * x + a_r[i] * x_inv; - b_l[i] = b_l[i] * x_inv + b_r[i] * x; - G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); - H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); - } - - P += ristretto::multiscalar_mult(&[x*x, x_inv*x_inv], &[L, R]); - a = a_l; - b = b_l; - G = G_l; - H = H_l; - } - debug_assert_eq!(a.len(), 1); - return Proof { - l_vec: L_vec, - r_vec: R_vec, - a_final: a[0], - b_final: b[0], - } - } + pub fn prove( + mut G_vec: Vec, + mut H_vec: Vec, + mut P: RistrettoPoint, + Q: RistrettoPoint, + mut a_vec: Vec, + mut b_vec: Vec, + ) -> Proof { + let mut G = &mut G_vec[..]; + let mut H = &mut H_vec[..]; + let mut a = &mut a_vec[..]; + let mut b = &mut b_vec[..]; + + let mut n = G.len(); + let lg_n = n.next_power_of_two().trailing_zeros() as usize; + let mut L_vec = Vec::with_capacity(lg_n); + let mut R_vec = Vec::with_capacity(lg_n); + + while n != 1 { + n = n / 2; + let (a_l, a_r) = a.split_at_mut(n); + let (b_l, b_r) = b.split_at_mut(n); + let (G_l, G_r) = G.split_at_mut(n); + let (H_l, H_r) = H.split_at_mut(n); + + let c_l = inner_product(&a_l, &b_r); + let c_r = inner_product(&a_r, &b_l); + + let L = ristretto::multiscalar_mult( + a_l.iter().chain(b_r.iter()).chain(iter::once(&c_l)), + G_r.iter().chain(H_l.iter()).chain(iter::once(&Q)), + ); + + let R = ristretto::multiscalar_mult( + a_r.iter().chain(b_l.iter()).chain(iter::once(&c_r)), + G_l.iter().chain(H_r.iter()).chain(iter::once(&Q)), + ); + + L_vec.push(L); + R_vec.push(R); + + // TODO: use random oracle for the challenge instead + let (x, _) = commit(&L, &R); + let x_inv = x.invert(); + + for i in 0..n { + a_l[i] = a_l[i] * x + a_r[i] * x_inv; + b_l[i] = b_l[i] * x_inv + b_r[i] * x; + G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); + H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); + } + + P += ristretto::multiscalar_mult(&[x * x, x_inv * x_inv], &[L, R]); + a = a_l; + b = b_l; + G = G_l; + H = H_l; + } + debug_assert_eq!(a.len(), 1); + return Proof { + l_vec: L_vec, + r_vec: R_vec, + a_final: a[0], + b_final: b[0], + }; + } } #[cfg(test)] mod tests { - use super::*; - #[test] - fn make_ipp_64() { - let n = 64; + use super::*; + #[test] + fn make_ipp_64() { + let n = 64; let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -104,14 +102,27 @@ mod tests { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); - - assert_eq!(proof.a_final.as_bytes(), b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04"); - assert_eq!(proof.b_final.as_bytes(), b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08"); - } - #[test] - fn make_ipp_32() { - let n = 32; + let proof = Prover::prove( + G_vec.clone(), + H_vec.clone(), + P, + Q, + a_vec.clone(), + b_vec.clone(), + ); + + assert_eq!( + proof.a_final.as_bytes(), + b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04" + ); + assert_eq!( + proof.b_final.as_bytes(), + b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08" + ); + } + #[test] + fn make_ipp_32() { + let n = 32; let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -121,11 +132,24 @@ mod tests { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - let proof = Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone()); - - assert_eq!(proof.a_final.as_bytes(), b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n"); - assert_eq!(proof.b_final.as_bytes(), b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05"); - } + let proof = Prover::prove( + G_vec.clone(), + H_vec.clone(), + P, + Q, + a_vec.clone(), + b_vec.clone(), + ); + + assert_eq!( + proof.a_final.as_bytes(), + b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n" + ); + assert_eq!( + proof.b_final.as_bytes(), + b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05" + ); + } } #[cfg(test)] @@ -136,7 +160,7 @@ mod bench { #[bench] fn make_ipp_64(b: &mut Bencher) { - let n = 64; + let n = 64; let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -146,11 +170,20 @@ mod bench { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); + b.iter(|| { + Prover::prove( + G_vec.clone(), + H_vec.clone(), + P, + Q, + a_vec.clone(), + b_vec.clone(), + ) + }); } #[bench] fn make_ipp_32(b: &mut Bencher) { - let n = 32; + let n = 32; let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -160,6 +193,15 @@ mod bench { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - b.iter(|| Prover::prove(G_vec.clone(), H_vec.clone(), P, Q, a_vec.clone(), b_vec.clone())); + b.iter(|| { + Prover::prove( + G_vec.clone(), + H_vec.clone(), + P, + Q, + a_vec.clone(), + b_vec.clone(), + ) + }); } } diff --git a/src/scalar.rs b/src/scalar.rs index fc838ea7..8af9c906 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -21,10 +21,10 @@ pub fn batch_invert(inputs: &mut [Scalar]) { // the leaf nodes with the input variables. Finally, set every // non-leaf node to be the product of its children. let n = inputs.len().next_power_of_two(); - let mut tree = vec![Scalar::one(); 2*n]; - tree[n..n+inputs.len()].copy_from_slice(inputs); + let mut tree = vec![Scalar::one(); 2 * n]; + tree[n..n + inputs.len()].copy_from_slice(inputs); for i in (1..n).rev() { - tree[i] = &tree[2*i] * &tree[2*i+1]; + tree[i] = &tree[2 * i] * &tree[2 * i + 1]; } // The root of the tree is the product of all inputs, and is @@ -52,20 +52,28 @@ pub fn batch_invert(inputs: &mut [Scalar]) { #[cfg(test)] mod test { - use super::*; + use super::*; -#[test] - fn batch_invert_matches_nonbatched() { - let W = Scalar::from_bits(b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07"); - let X = Scalar::from_bits(b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04"); - let Y = Scalar::from_bits(b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05"); - let Z = Scalar::from_bits(b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b"); + #[test] + fn batch_invert_matches_nonbatched() { + let W = Scalar::from_bits( + b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", + ); + let X = Scalar::from_bits( + b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04", + ); + let Y = Scalar::from_bits( + b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05", + ); + let Z = Scalar::from_bits( + b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b", + ); - let list = vec![W, X, Y, Z, W*Y, X*Z, Y*Y, W*Z]; - let mut inv_list = list.clone(); - batch_invert(&mut inv_list[..]); - for i in 0..8 { - assert_eq!(list[i].invert(), inv_list[i]); - } - } + let list = vec![W, X, Y, Z, W * Y, X * Z, Y * Y, W * Z]; + let mut inv_list = list.clone(); + batch_invert(&mut inv_list[..]); + for i in 0..8 { + assert_eq!(list[i].invert(), inv_list[i]); + } + } } From 07b023ecb1dc9ba6903082d67c9a9fe2163a41ac Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:15:40 -0800 Subject: [PATCH 037/186] remove feature not in crate root --- src/inner_product_proof.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 2ace4d85..9584acb8 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,5 +1,4 @@ #![allow(non_snake_case)] -#![feature(nll)] use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; From 3ee343fe44c84e3cb30d8f4f6d92520ce024786e Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:20:20 -0800 Subject: [PATCH 038/186] rearrange terms to get vertical alignment --- src/inner_product_proof.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 9584acb8..09faa9c5 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -64,8 +64,8 @@ impl Prover { let x_inv = x.invert(); for i in 0..n { - a_l[i] = a_l[i] * x + a_r[i] * x_inv; - b_l[i] = b_l[i] * x_inv + b_r[i] * x; + a_l[i] = a_l[i] * x + x_inv * a_r[i]; + b_l[i] = b_l[i] * x_inv + x * b_r[i]; G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); } From eda44afe85765bc9256f45fce23bdab7b6ae48ce Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:20:40 -0800 Subject: [PATCH 039/186] remove unnecessary P computation --- src/inner_product_proof.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 09faa9c5..c28a0983 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -70,13 +70,12 @@ impl Prover { H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); } - P += ristretto::multiscalar_mult(&[x * x, x_inv * x_inv], &[L, R]); a = a_l; b = b_l; G = G_l; H = H_l; } - debug_assert_eq!(a.len(), 1); + return Proof { l_vec: L_vec, r_vec: R_vec, From 1a0a3814ac2ac9a462baeb4588c27d2b87637553 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:24:42 -0800 Subject: [PATCH 040/186] Move proof creation into impl Proof This way we can do Proof::create() and then proof.verify() --- src/inner_product_proof.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index c28a0983..feb82328 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -8,7 +8,6 @@ use range_proof::inner_product; use range_proof::commit; // replace with the random oracle use range_proof::make_generators; use sha2::Sha256; -pub struct Prover {} pub struct Proof { l_vec: Vec, @@ -17,8 +16,8 @@ pub struct Proof { b_final: Scalar, } -impl Prover { - pub fn prove( +impl Proof { + pub fn create( mut G_vec: Vec, mut H_vec: Vec, mut P: RistrettoPoint, @@ -100,7 +99,7 @@ mod tests { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - let proof = Prover::prove( + let proof = Proof::create( G_vec.clone(), H_vec.clone(), P, @@ -130,7 +129,7 @@ mod tests { let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; - let proof = Prover::prove( + let proof = Proof::create( G_vec.clone(), H_vec.clone(), P, @@ -169,7 +168,7 @@ mod bench { let b_vec = vec![Scalar::from_u64(2); n]; b.iter(|| { - Prover::prove( + Proof::create( G_vec.clone(), H_vec.clone(), P, @@ -192,7 +191,7 @@ mod bench { let b_vec = vec![Scalar::from_u64(2); n]; b.iter(|| { - Prover::prove( + Proof::create( G_vec.clone(), H_vec.clone(), P, From 2fccb805bd8181c923d001be96d8750307bc905e Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:46:13 -0800 Subject: [PATCH 041/186] factor out common test code into helper functions --- src/inner_product_proof.rs | 91 ++++++++++++++------------------------ 1 file changed, 32 insertions(+), 59 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index feb82328..c34abedf 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -87,9 +87,8 @@ impl Proof { #[cfg(test)] mod tests { use super::*; - #[test] - fn make_ipp_64() { - let n = 64; + + fn test_helper_create(n: usize, expected_a: &[u8; 32], expected_b: &[u8; 32]) { let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -108,43 +107,27 @@ mod tests { b_vec.clone(), ); - assert_eq!( - proof.a_final.as_bytes(), - b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04" - ); - assert_eq!( - proof.b_final.as_bytes(), - b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08" - ); + assert_eq!(proof.a_final.as_bytes(), expected_a); + assert_eq!(proof.b_final.as_bytes(), expected_b); } - #[test] - fn make_ipp_32() { - let n = 32; - let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G_vec = make_generators(G, n); - let H_vec = make_generators(H, n); - let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); - let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); - let a_vec = vec![Scalar::from_u64(1); n]; - let b_vec = vec![Scalar::from_u64(2); n]; - let proof = Proof::create( - G_vec.clone(), - H_vec.clone(), - P, - Q, - a_vec.clone(), - b_vec.clone(), + #[test] + fn make_ipp_64() { + // These test vectors don't have a ground truth, they're just to catch accidental changes to the computation. + test_helper_create( + 64, + b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04", + b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08", ); + } - assert_eq!( - proof.a_final.as_bytes(), - b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n" - ); - assert_eq!( - proof.b_final.as_bytes(), - b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05" + #[test] + fn make_ipp_32() { + // These test vectors don't have a ground truth, they're just to catch accidental changes to the computation. + test_helper_create( + 32, + b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n", + b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05", ); } } @@ -155,9 +138,7 @@ mod bench { use super::*; use test::Bencher; - #[bench] - fn make_ipp_64(b: &mut Bencher) { - let n = 64; + fn bench_helper_create(n: usize, b: &mut Bencher) { let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -178,27 +159,19 @@ mod bench { ) }); } + #[bench] - fn make_ipp_32(b: &mut Bencher) { - let n = 32; - let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G_vec = make_generators(G, n); - let H_vec = make_generators(H, n); - let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); - let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); - let a_vec = vec![Scalar::from_u64(1); n]; - let b_vec = vec![Scalar::from_u64(2); n]; + fn create_n_eq_64(b: &mut Bencher) { + bench_helper_create(64, b); + } - b.iter(|| { - Proof::create( - G_vec.clone(), - H_vec.clone(), - P, - Q, - a_vec.clone(), - b_vec.clone(), - ) - }); + #[bench] + fn create_n_eq_32(b: &mut Bencher) { + bench_helper_create(32, b); + } + + #[bench] + fn create_n_eq_16(b: &mut Bencher) { + bench_helper_create(16, b); } } From e8a0895f8920429eca9b45f3a5ae511826c4b4b4 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:52:27 -0800 Subject: [PATCH 042/186] use _L, _R to match notation in the paper --- src/inner_product_proof.rs | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index c34abedf..8d540e71 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -37,22 +37,22 @@ impl Proof { while n != 1 { n = n / 2; - let (a_l, a_r) = a.split_at_mut(n); - let (b_l, b_r) = b.split_at_mut(n); - let (G_l, G_r) = G.split_at_mut(n); - let (H_l, H_r) = H.split_at_mut(n); + let (a_L, a_R) = a.split_at_mut(n); + let (b_L, b_R) = b.split_at_mut(n); + let (G_L, G_R) = G.split_at_mut(n); + let (H_L, H_R) = H.split_at_mut(n); - let c_l = inner_product(&a_l, &b_r); - let c_r = inner_product(&a_r, &b_l); + let c_L = inner_product(&a_L, &b_R); + let c_R = inner_product(&a_R, &b_L); let L = ristretto::multiscalar_mult( - a_l.iter().chain(b_r.iter()).chain(iter::once(&c_l)), - G_r.iter().chain(H_l.iter()).chain(iter::once(&Q)), + a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), + G_R.iter().chain(H_L.iter()).chain(iter::once(&Q)), ); let R = ristretto::multiscalar_mult( - a_r.iter().chain(b_l.iter()).chain(iter::once(&c_r)), - G_l.iter().chain(H_r.iter()).chain(iter::once(&Q)), + a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), + G_L.iter().chain(H_R.iter()).chain(iter::once(&Q)), ); L_vec.push(L); @@ -63,16 +63,16 @@ impl Proof { let x_inv = x.invert(); for i in 0..n { - a_l[i] = a_l[i] * x + x_inv * a_r[i]; - b_l[i] = b_l[i] * x_inv + x * b_r[i]; - G_l[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_l[i], G_r[i]]); - H_l[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_l[i], H_r[i]]); + a_L[i] = a_L[i] * x + x_inv * a_R[i]; + b_L[i] = b_L[i] * x_inv + x * b_R[i]; + G_L[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_L[i], G_R[i]]); + H_L[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_L[i], H_R[i]]); } - a = a_l; - b = b_l; - G = G_l; - H = H_l; + a = a_L; + b = b_L; + G = G_L; + H = H_L; } return Proof { From 4aaf3755ecf878e31c403aba78e65057cfb17f82 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:53:36 -0800 Subject: [PATCH 043/186] Rename a_final, b_final to just a,b --- src/inner_product_proof.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 8d540e71..c6d3f6cf 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -12,8 +12,8 @@ use sha2::Sha256; pub struct Proof { l_vec: Vec, r_vec: Vec, - a_final: Scalar, - b_final: Scalar, + a: Scalar, + b: Scalar, } impl Proof { @@ -78,8 +78,8 @@ impl Proof { return Proof { l_vec: L_vec, r_vec: R_vec, - a_final: a[0], - b_final: b[0], + a: a[0], + b: b[0], }; } } @@ -107,8 +107,8 @@ mod tests { b_vec.clone(), ); - assert_eq!(proof.a_final.as_bytes(), expected_a); - assert_eq!(proof.b_final.as_bytes(), expected_b); + assert_eq!(proof.a.as_bytes(), expected_a); + assert_eq!(proof.b.as_bytes(), expected_b); } #[test] From 13b6dc493b77aa0ec8e0d46634fd74f40846c90a Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 19:55:22 -0800 Subject: [PATCH 044/186] Pass P,Q as borrows --- src/inner_product_proof.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index c6d3f6cf..7c882a55 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -18,10 +18,10 @@ pub struct Proof { impl Proof { pub fn create( + P: &RistrettoPoint, + Q: &RistrettoPoint, mut G_vec: Vec, mut H_vec: Vec, - mut P: RistrettoPoint, - Q: RistrettoPoint, mut a_vec: Vec, mut b_vec: Vec, ) -> Proof { @@ -47,12 +47,12 @@ impl Proof { let L = ristretto::multiscalar_mult( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), - G_R.iter().chain(H_L.iter()).chain(iter::once(&Q)), + G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); let R = ristretto::multiscalar_mult( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), - G_L.iter().chain(H_R.iter()).chain(iter::once(&Q)), + G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); L_vec.push(L); @@ -99,10 +99,10 @@ mod tests { let b_vec = vec![Scalar::from_u64(2); n]; let proof = Proof::create( + &P, + &Q, G_vec.clone(), H_vec.clone(), - P, - Q, a_vec.clone(), b_vec.clone(), ); @@ -150,10 +150,10 @@ mod bench { b.iter(|| { Proof::create( + &P, + &Q, G_vec.clone(), H_vec.clone(), - P, - Q, a_vec.clone(), b_vec.clone(), ) From 72f097bc64d67a2b5528d276b628b0f623ff149c Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 20:23:05 -0800 Subject: [PATCH 045/186] Use random oracle API for proof creation --- src/inner_product_proof.rs | 25 ++++++++++++++++++++----- src/lib.rs | 2 +- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 7c882a55..2ca1da33 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -4,9 +4,12 @@ use std::iter; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::scalar::Scalar; + +use random_oracle::RandomOracle; + use range_proof::inner_product; -use range_proof::commit; // replace with the random oracle use range_proof::make_generators; + use sha2::Sha256; pub struct Proof { @@ -17,7 +20,13 @@ pub struct Proof { } impl Proof { + /// Create an inner-product proof. + /// + /// The `verifier` is passed in as a parameter so that the + /// challenges depend on the *entire* transcript (including parent + /// protocols). pub fn create( + verifier: &mut RandomOracle, P: &RistrettoPoint, Q: &RistrettoPoint, mut G_vec: Vec, @@ -58,8 +67,10 @@ impl Proof { L_vec.push(L); R_vec.push(R); - // TODO: use random oracle for the challenge instead - let (x, _) = commit(&L, &R); + verifier.commit(L.compress().as_bytes()); + verifier.commit(R.compress().as_bytes()); + + let x = verifier.challenge_scalar(); let x_inv = x.invert(); for i in 0..n { @@ -89,6 +100,7 @@ mod tests { use super::*; fn test_helper_create(n: usize, expected_a: &[u8; 32], expected_b: &[u8; 32]) { + let mut verifier = RandomOracle::new(b"innerproducttest"); let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -99,6 +111,7 @@ mod tests { let b_vec = vec![Scalar::from_u64(2); n]; let proof = Proof::create( + &mut verifier, &P, &Q, G_vec.clone(), @@ -107,8 +120,8 @@ mod tests { b_vec.clone(), ); - assert_eq!(proof.a.as_bytes(), expected_a); - assert_eq!(proof.b.as_bytes(), expected_b); + //assert_eq!(proof.a.as_bytes(), expected_a); + //assert_eq!(proof.b.as_bytes(), expected_b); } #[test] @@ -139,6 +152,7 @@ mod bench { use test::Bencher; fn bench_helper_create(n: usize, b: &mut Bencher) { + let mut verifier = RandomOracle::new(b"innerproducttest"); let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -150,6 +164,7 @@ mod bench { b.iter(|| { Proof::create( + &mut verifier, &P, &Q, G_vec.clone(), diff --git a/src/lib.rs b/src/lib.rs index 3f5dfb6e..40d8a10f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,7 +11,7 @@ extern crate tiny_keccak; #[cfg(test)] extern crate test; -mod random_oracle; +pub mod random_oracle; mod range_proof; mod inner_product_proof; From 58b4dfcf847c409707501d8907d91356824c4360 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 20:23:41 -0800 Subject: [PATCH 046/186] Check that all inputs to innerproduct proof have the same len --- src/inner_product_proof.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 2ca1da33..a5245b1c 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -34,12 +34,22 @@ impl Proof { mut a_vec: Vec, mut b_vec: Vec, ) -> Proof { + // Create slices G, H, a, b backed by their respective + // vectors. This lets us reslice as we compress the lengths + // of the vectors in the main loop below. let mut G = &mut G_vec[..]; let mut H = &mut H_vec[..]; let mut a = &mut a_vec[..]; let mut b = &mut b_vec[..]; let mut n = G.len(); + + // All of the input vectors must have the same length. + assert_eq!(G.len(), n); + assert_eq!(H.len(), n); + assert_eq!(a.len(), n); + assert_eq!(b.len(), n); + let lg_n = n.next_power_of_two().trailing_zeros() as usize; let mut L_vec = Vec::with_capacity(lg_n); let mut R_vec = Vec::with_capacity(lg_n); From 8a9e1ab4668015ed45e5f042ff4c0cb4acbff394 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 21:31:37 -0800 Subject: [PATCH 047/186] oops, the scalar module wasn't included in lib.rs --- src/lib.rs | 2 ++ src/scalar.rs | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 40d8a10f..b14fe073 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,4 +15,6 @@ pub mod random_oracle; mod range_proof; mod inner_product_proof; +pub mod scalar; + pub use range_proof::*; diff --git a/src/scalar.rs b/src/scalar.rs index 8af9c906..f2b14a55 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -57,16 +57,16 @@ mod test { #[test] fn batch_invert_matches_nonbatched() { let W = Scalar::from_bits( - b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", + *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", ); let X = Scalar::from_bits( - b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04", + *b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04", ); let Y = Scalar::from_bits( - b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05", + *b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05", ); let Z = Scalar::from_bits( - b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b", + *b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b", ); let list = vec![W, X, Y, Z, W * Y, X * Z, Y * Y, W * Z]; From fe7fe8488ff32b7df1f41518bc9613644fea97e5 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Feb 2018 23:29:19 -0800 Subject: [PATCH 048/186] add (failing) verify method --- src/inner_product_proof.rs | 92 ++++++++++++++++++++++++++++++++++++-- src/scalar.rs | 9 +++- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index a5245b1c..a0ebc44d 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -5,6 +5,9 @@ use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::scalar::Scalar; +// XXX upstream into dalek +use scalar; + use random_oracle::RandomOracle; use range_proof::inner_product; @@ -13,8 +16,8 @@ use range_proof::make_generators; use sha2::Sha256; pub struct Proof { - l_vec: Vec, - r_vec: Vec, + L_vec: Vec, + R_vec: Vec, a: Scalar, b: Scalar, } @@ -97,12 +100,90 @@ impl Proof { } return Proof { - l_vec: L_vec, - r_vec: R_vec, + L_vec: L_vec, + R_vec: R_vec, a: a[0], b: b[0], }; } + + fn verify( + self, + verifier: &mut RandomOracle, + P: &RistrettoPoint, + Q: &RistrettoPoint, + G_vec: &Vec, + H_vec: &Vec, + ) -> Result<(), ()> { + // XXX prover should commit to n + let lg_n = self.L_vec.len(); + let n = 1 << lg_n; + + // XXX figure out how ser/deser works for Proofs + // maybe avoid this compression + let mut challenges = Vec::with_capacity(lg_n); + for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { + verifier.commit(L.compress().as_bytes()); + verifier.commit(R.compress().as_bytes()); + + challenges.push(verifier.challenge_scalar()); + } + + let mut inv_challenges = challenges.clone(); + let allinv = scalar::batch_invert(&mut inv_challenges); + + for x in challenges.iter_mut() { + *x = &*x * &*x; // wtf + } + let challenges_sq = challenges; + + // j-th bit of i + let bit = |i, j| 1 & (i >> j); + + let mut s = Vec::with_capacity(n); + for i in 0..n { + let mut s_i = allinv; + for j in 0..lg_n { + if bit(i, j) == 1 { + s_i *= challenges_sq[j]; + } + } + s.push(s_i); + } + let s = s; + + // so many allocs :( + // these were supposed to be iterators but the dalek trait doesn't accept values + + let ab = self.a * self.b; + + let a_times_s: Vec<_> = s.iter().map(|s_i| self.a * s_i).collect(); + + let b_div_s: Vec<_> = s.iter().rev().map(|s_i_inv| self.b * s_i_inv).collect(); + + let neg_x_sq: Vec<_> = challenges_sq.iter().map(|x| -x).collect(); + + let neg_x_inv_sq: Vec<_> = inv_challenges + .iter() + .map(|x_inv| -(x_inv * x_inv)) + .collect(); + + let scalar_iter = iter::once(&ab) + .chain(a_times_s.iter()) + .chain(b_div_s.iter()) + .chain(neg_x_sq.iter()) + .chain(neg_x_inv_sq.iter()); + + let points_iter = iter::once(Q) + .chain(G_vec.iter()) + .chain(H_vec.iter()) + .chain(self.L_vec.iter()) + .chain(self.R_vec.iter()); + + let expect_P = ristretto::multiscalar_mult(scalar_iter, points_iter); + + if expect_P == *P { Ok(()) } else { Err(()) } + } } #[cfg(test)] @@ -130,6 +211,9 @@ mod tests { b_vec.clone(), ); + let mut verifier = RandomOracle::new(b"innerproducttest"); + assert!(proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec).is_ok()); + //assert_eq!(proof.a.as_bytes(), expected_a); //assert_eq!(proof.b.as_bytes(), expected_b); } diff --git a/src/scalar.rs b/src/scalar.rs index f2b14a55..edcf6360 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -1,6 +1,11 @@ use curve25519_dalek::scalar::Scalar; -pub fn batch_invert(inputs: &mut [Scalar]) { +/// Replace each element of `inputs` with its inverse. +/// +/// All inputs must be nonzero. +/// +/// Returns the inverse of the product of all inputs. +pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar { // First, compute the product of all inputs using a product // tree: // @@ -48,6 +53,8 @@ pub fn batch_invert(inputs: &mut [Scalar]) { } inputs[i] = inv; } + + allinv } #[cfg(test)] From bf8fa35c413cfb334f24b4d62c9f1734c33cc08a Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 20 Feb 2018 13:47:14 -0800 Subject: [PATCH 049/186] add benchmark stubs for (still broken) verification --- src/inner_product_proof.rs | 42 +++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index a0ebc44d..04354706 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -108,7 +108,7 @@ impl Proof { } fn verify( - self, + &self, verifier: &mut RandomOracle, P: &RistrettoPoint, Q: &RistrettoPoint, @@ -269,6 +269,31 @@ mod bench { }); } + fn bench_helper_verify(n: usize, b: &mut Bencher) { + let mut verifier = RandomOracle::new(b"innerproducttest"); + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G_vec = make_generators(G, n); + let H_vec = make_generators(H, n); + let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); + let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; + let b_vec = vec![Scalar::from_u64(2); n]; + + let proof = Proof::create( + &mut verifier, + &P, + &Q, + G_vec.clone(), + H_vec.clone(), + a_vec.clone(), + b_vec.clone(), + ); + + let mut verifier = RandomOracle::new(b"innerproducttest"); + b.iter(|| proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec)); + } + #[bench] fn create_n_eq_64(b: &mut Bencher) { bench_helper_create(64, b); @@ -283,4 +308,19 @@ mod bench { fn create_n_eq_16(b: &mut Bencher) { bench_helper_create(16, b); } + + #[bench] + fn verify_n_eq_64(b: &mut Bencher) { + bench_helper_verify(64, b); + } + + #[bench] + fn verify_n_eq_32(b: &mut Bencher) { + bench_helper_verify(32, b); + } + + #[bench] + fn verify_n_eq_16(b: &mut Bencher) { + bench_helper_verify(16, b); + } } From 9cb78fc6adb37c8834d9b793bbdee9bde4d682a2 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 20 Feb 2018 16:16:15 -0800 Subject: [PATCH 050/186] Test proof creation with valid proofs --- src/inner_product_proof.rs | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 04354706..8a98aeac 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -196,11 +196,18 @@ mod tests { let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); let H_vec = make_generators(H, n); - let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); - let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + let c = inner_product(&a_vec, &b_vec); + + let P = ristretto::multiscalar_mult( + a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), + G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), + ); + let proof = Proof::create( &mut verifier, &P, @@ -275,11 +282,18 @@ mod bench { let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); let H_vec = make_generators(H, n); - let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); - let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); + let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + let c = inner_product(&a_vec, &b_vec); + + let P = ristretto::multiscalar_mult( + a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), + G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), + ); + let proof = Proof::create( &mut verifier, &P, From 4e78d4f6248160e3c4ebc1f5895e6f8fd46c78b5 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 20 Feb 2018 16:18:48 -0800 Subject: [PATCH 051/186] Add minimal test cases for proofs --- src/inner_product_proof.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 8a98aeac..5569468c 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -225,6 +225,21 @@ mod tests { //assert_eq!(proof.b.as_bytes(), expected_b); } + #[test] + fn make_ipp_1() { + test_helper_create(1, &[0; 32], &[0; 32]); + } + + #[test] + fn make_ipp_2() { + test_helper_create(2, &[0; 32], &[0; 32]); + } + + #[test] + fn make_ipp_4() { + test_helper_create(4, &[0; 32], &[0; 32]); + } + #[test] fn make_ipp_64() { // These test vectors don't have a ground truth, they're just to catch accidental changes to the computation. From 962b354633cb53014930c3abe1e6eb9729d7094f Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 20 Feb 2018 16:23:23 -0800 Subject: [PATCH 052/186] fix challenge ordering --- src/inner_product_proof.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 5569468c..af05e3e9 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -145,7 +145,8 @@ impl Proof { let mut s_i = allinv; for j in 0..lg_n { if bit(i, j) == 1 { - s_i *= challenges_sq[j]; + // The challenges are stored in "creation order" as [x_k,...,x_1] + s_i *= challenges_sq[(lg_n - 1) - j]; } } s.push(s_i); From 34ecfb035fa6eeed2ea611652fa6eedc418c4240 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 20 Feb 2018 16:57:10 -0800 Subject: [PATCH 053/186] Use vartime multiscalar mult since the inner-product proof works on public data --- src/inner_product_proof.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index af05e3e9..636aa5bc 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -67,12 +67,12 @@ impl Proof { let c_L = inner_product(&a_L, &b_R); let c_R = inner_product(&a_R, &b_L); - let L = ristretto::multiscalar_mult( + let L = ristretto::vartime::multiscalar_mult( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); - let R = ristretto::multiscalar_mult( + let R = ristretto::vartime::multiscalar_mult( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); @@ -89,8 +89,8 @@ impl Proof { for i in 0..n { a_L[i] = a_L[i] * x + x_inv * a_R[i]; b_L[i] = b_L[i] * x_inv + x * b_R[i]; - G_L[i] = ristretto::multiscalar_mult(&[x_inv, x], &[G_L[i], G_R[i]]); - H_L[i] = ristretto::multiscalar_mult(&[x, x_inv], &[H_L[i], H_R[i]]); + G_L[i] = ristretto::vartime::multiscalar_mult(&[x_inv, x], &[G_L[i], G_R[i]]); + H_L[i] = ristretto::vartime::multiscalar_mult(&[x, x_inv], &[H_L[i], H_R[i]]); } a = a_L; @@ -181,7 +181,7 @@ impl Proof { .chain(self.L_vec.iter()) .chain(self.R_vec.iter()); - let expect_P = ristretto::multiscalar_mult(scalar_iter, points_iter); + let expect_P = ristretto::vartime::multiscalar_mult(scalar_iter, points_iter); if expect_P == *P { Ok(()) } else { Err(()) } } @@ -204,7 +204,7 @@ mod tests { let Q = RistrettoPoint::hash_from_bytes::(b"test point"); let c = inner_product(&a_vec, &b_vec); - let P = ristretto::multiscalar_mult( + let P = ristretto::vartime::multiscalar_mult( a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), ); @@ -305,7 +305,7 @@ mod bench { let Q = RistrettoPoint::hash_from_bytes::(b"test point"); let c = inner_product(&a_vec, &b_vec); - let P = ristretto::multiscalar_mult( + let P = ristretto::vartime::multiscalar_mult( a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), ); From aab7bd0de0c46b6f2594037371963ff0bd85ea56 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 12 Mar 2018 12:26:10 -0700 Subject: [PATCH 054/186] Rename to `ProofTranscript` (#13) * Move random_oracle.rs to proof_transcript.rs * Rename RandomOracle to ProofTranscript, and tweak implementation * Messages now have a 2-byte length prefix, allowing 64KB messages. * Add ProofTranscript::commit_u64 convenience method --- Cargo.toml | 1 + src/inner_product_proof.rs | 16 +-- src/lib.rs | 3 +- src/proof_transcript.rs | 222 +++++++++++++++++++++++++++++++++++++ src/random_oracle.rs | 167 ---------------------------- 5 files changed, 233 insertions(+), 176 deletions(-) create mode 100644 src/proof_transcript.rs delete mode 100644 src/random_oracle.rs diff --git a/Cargo.toml b/Cargo.toml index 30ef520f..629799cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ curve25519-dalek = { version = "^0.15", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" tiny-keccak = "^1.4" +byteorder = "1.2.1" [dev-dependencies] hex = "^0.3" diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 636aa5bc..c80d0d1d 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -8,7 +8,7 @@ use curve25519_dalek::scalar::Scalar; // XXX upstream into dalek use scalar; -use random_oracle::RandomOracle; +use proof_transcript::ProofTranscript; use range_proof::inner_product; use range_proof::make_generators; @@ -29,7 +29,7 @@ impl Proof { /// challenges depend on the *entire* transcript (including parent /// protocols). pub fn create( - verifier: &mut RandomOracle, + verifier: &mut ProofTranscript, P: &RistrettoPoint, Q: &RistrettoPoint, mut G_vec: Vec, @@ -109,7 +109,7 @@ impl Proof { fn verify( &self, - verifier: &mut RandomOracle, + verifier: &mut ProofTranscript, P: &RistrettoPoint, Q: &RistrettoPoint, G_vec: &Vec, @@ -192,7 +192,7 @@ mod tests { use super::*; fn test_helper_create(n: usize, expected_a: &[u8; 32], expected_b: &[u8; 32]) { - let mut verifier = RandomOracle::new(b"innerproducttest"); + let mut verifier = ProofTranscript::new(b"innerproducttest"); let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -219,7 +219,7 @@ mod tests { b_vec.clone(), ); - let mut verifier = RandomOracle::new(b"innerproducttest"); + let mut verifier = ProofTranscript::new(b"innerproducttest"); assert!(proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec).is_ok()); //assert_eq!(proof.a.as_bytes(), expected_a); @@ -269,7 +269,7 @@ mod bench { use test::Bencher; fn bench_helper_create(n: usize, b: &mut Bencher) { - let mut verifier = RandomOracle::new(b"innerproducttest"); + let mut verifier = ProofTranscript::new(b"innerproducttest"); let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -293,7 +293,7 @@ mod bench { } fn bench_helper_verify(n: usize, b: &mut Bencher) { - let mut verifier = RandomOracle::new(b"innerproducttest"); + let mut verifier = ProofTranscript::new(b"innerproducttest"); let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); @@ -320,7 +320,7 @@ mod bench { b_vec.clone(), ); - let mut verifier = RandomOracle::new(b"innerproducttest"); + let mut verifier = ProofTranscript::new(b"innerproducttest"); b.iter(|| proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec)); } diff --git a/src/lib.rs b/src/lib.rs index b14fe073..9cfcc8cd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,12 +6,13 @@ extern crate curve25519_dalek; extern crate sha2; extern crate rand; +extern crate byteorder; extern crate tiny_keccak; #[cfg(test)] extern crate test; -pub mod random_oracle; +pub mod proof_transcript; mod range_proof; mod inner_product_proof; diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs new file mode 100644 index 00000000..e4d0762e --- /dev/null +++ b/src/proof_transcript.rs @@ -0,0 +1,222 @@ +#![deny(missing_docs)] + +//! The `proof_transcript` module contains API designed to allow +//! implementation of non-interactive proofs as if they were +//! interactive, using the Fiat-Shamir transform. + +use curve25519_dalek::scalar::Scalar; + +// XXX fix up deps (see comment below re: "api somewhat unclear") +use tiny_keccak::Keccak; + +use byteorder::{ByteOrder, LittleEndian}; + +/// The `SpongeState` enum describes the state of the internal sponge +/// used by a `ProofTranscript`. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +enum SpongeState { + Absorbing, + Squeezing, +} + +/// The `ProofTranscript` struct represents a transcript of messages +/// between a prover and verifier engaged in a public-coin argument. +/// +/// The prover can send messages to the `ProofTranscript` object, +/// which absorbs them into a sponge, and can then request challenges, +/// which are derived from all previous messages. +/// +/// The verifier can then construct its own `ProofTranscript` +/// object, send it (what should be) the same messages, and request +/// (what should be) the same challenge values. +/// +/// To create a `ProofTranscript` object, use `ProofTranscript::new()` +/// at the outermost protocol layer. A `&mut` reference to this +/// object can then be passed to any sub-protocols, making it easy to +/// ensure that their challenge values are bound to the *entire* proof +/// transcript, not just the sub-protocol. +/// +/// Internally, the `ProofTranscript` uses the Keccak sponge to +/// absorb messages and squeeze challenges. +/// +/// # Example +/// +/// ``` +/// # extern crate curve25519_dalek; +/// # extern crate ristretto_bulletproofs; +/// # use ristretto_bulletproofs::proof_transcript::ProofTranscript; +/// # fn main() { +/// +/// use curve25519_dalek::constants; +/// let B = &constants::RISTRETTO_BASEPOINT_TABLE; +/// +/// let mut transcript = ProofTranscript::new(b"MyProofName: Don't copypaste this"); +/// +/// // Send "some message" to the verifier +/// transcript.commit(b"some message"); +/// +/// // Extract a challenge scalar +/// let x = transcript.challenge_scalar(); +/// +/// // Send x * B to the verifier +/// let P = B * &x; +/// transcript.commit(P.compress().as_bytes()); +/// # } +/// ``` +#[derive(Clone)] +pub struct ProofTranscript { + hash: Keccak, + state: SpongeState, +} + +impl ProofTranscript { + /// Begin an new, empty proof transcript, using the given `label` + /// for domain separation. + pub fn new(label: &[u8]) -> Self { + let mut ro = ProofTranscript { + hash: Keccak::new_shake128(), + state: SpongeState::Absorbing, + }; + ro.commit(label); + // makes sure the label is disambiguated from the rest of the messages. + ro.pad(); + ro + } + + /// Commit a `message` to the proof transcript. + /// + /// # Note + /// + /// Each message must be shorter than 64Kb (65536 bytes). + pub fn commit(&mut self, message: &[u8]) { + self.set_state(SpongeState::Absorbing); + + let len = message.len(); + if len > (u16::max_value() as usize) { + panic!("Committed message must be less than 64Kb!"); + } + + let mut len_prefix = [0u8; 2]; + LittleEndian::write_u16(&mut len_prefix, len as u16); + + self.hash.absorb(&len_prefix); + self.hash.absorb(message); + } + + /// Commit a `u64` to the proof transcript. + /// + /// This is a convenience method that commits the little-endian + /// bytes of `value`. + pub fn commit_u64(&mut self, value: u64) { + let mut value_bytes = [0u8; 8]; + LittleEndian::write_u64(&mut value_bytes, value); + + self.commit(&value_bytes); + } + + /// Extracts an arbitrary-sized challenge byte slice. + pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { + self.set_state(SpongeState::Squeezing); + self.hash.squeeze(&mut output); + } + + /// Extracts a challenge scalar. + /// + /// This is a convenience method that extracts 64 bytes and + /// reduces modulo the group order. + pub fn challenge_scalar(&mut self) -> Scalar { + let mut buf = [0u8; 64]; + self.challenge_bytes(&mut buf); + Scalar::from_bytes_mod_order_wide(&buf) + } + + /// Set the sponge state to `new_state`. + /// + /// Does necessary padding+permutation if needed to transition + /// from one state to another. + fn set_state(&mut self, new_state: SpongeState) { + if self.state != new_state { + self.pad(); + self.state = new_state; + } + } + + /// Pad separates the prior operations by a full permutation. + /// Each incoming message is length-prefixed anyway, but padding + /// enables pre-computing and re-using the oracle state. + fn pad(&mut self) { + // tiny_keccak's API is not very clear, + // so we'd probably need to fork and either document it, or tweak to make it more sensible. + // 1. pad() only adds keccak padding, but does not advance internal offset and + // does not perform a permutation round. + // 2. fill_block() does not pad, but resets the internal offset + // and does a permutation round. + // + // XXX(hdevalence) before this is "production-ready", we + // should sort out what tiny_keccak is actually doing and + // decide on something sensible. Maybe this overlaps with + // Noise NXOF work? + match self.state { + SpongeState::Absorbing => { + self.hash.pad(); + self.hash.fill_block(); + } + SpongeState::Squeezing => { + // in the squeezing state we are not feeding messages, + // only reading portions of a state, so padding does not make sense. + // what we need is to perform computation and reset the internal offset to zero. + self.hash.fill_block(); + } + } + } +} + +#[cfg(test)] +mod tests { + extern crate hex; + use super::*; + + #[test] + fn messages_are_disambiguated_by_length_prefix() { + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "039f39a21e3cce4a"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg1"); + ro.commit(b"msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "b4c47055cfcec1d2"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"msg"); + ro.commit(b"1msg2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "325247ab6948b7a1"); + } + } + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"ms"); + ro.commit(b"g1ms"); + ro.commit(b"g2"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "04068112e4fa0f44"); + } + } + } +} diff --git a/src/random_oracle.rs b/src/random_oracle.rs deleted file mode 100644 index a37d10ed..00000000 --- a/src/random_oracle.rs +++ /dev/null @@ -1,167 +0,0 @@ -use curve25519_dalek::scalar::Scalar; -use tiny_keccak::Keccak; - -#[derive(Clone)] -pub struct RandomOracle { - hash: Keccak, - state: SpongeState, -} - -impl RandomOracle { - /// Instantiates a new random oracle with a given label that - /// will be committed as a first message with a padding. - pub fn new(label: &[u8]) -> Self { - let mut ro = RandomOracle { - hash: Keccak::new_shake128(), - state: SpongeState::Absorbing, - }; - ro.commit(label); - // makes sure the label is disambiguated from the rest of the messages. - ro.pad(); - ro - } - - /// Sends a message to a random oracle. - /// Each message must be less than 256 bytes long. - pub fn commit(&mut self, message: &[u8]) { - self.set_state(SpongeState::Absorbing); - let len = message.len(); - if len > 255 { - panic!("Committed message must be less than 256 bytes!"); - } - // we use 1-byte length prefix, hence the limitation on the message size. - let lenprefix = [len as u8; 1]; - self.hash.absorb(&lenprefix); - self.hash.absorb(message); - } - - /// Extracts an arbitrary-sized number of bytes as a challenge. - pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { - self.set_state(SpongeState::Squeezing); - self.hash.squeeze(&mut output); - } - - /// Gets a challenge in a form of a scalar by squeezing - /// 64 bytes and reducing them to a scalar. - pub fn challenge_scalar(&mut self) -> Scalar { - let mut buf = [0u8; 64]; - self.challenge_bytes(&mut buf); - Scalar::from_bytes_mod_order_wide(&buf) - } - - /// Ensures that the state is correct. - /// Does necessary padding+permutation if needed to transition from one state to another. - fn set_state(&mut self, newstate: SpongeState) { - if self.state != newstate { - self.pad(); - self.state = newstate; - } - } - - /// Pad separates the prior operations by a full permutation. - /// Each incoming message is length-prefixed anyway, but padding - /// enables pre-computing and re-using the oracle state. - fn pad(&mut self) { - // tiny_keccak's API is not very clear, - // so we'd probably need to fork and either document it, or tweak to make it more sensible. - // 1. pad() only adds keccak padding, but does not advance internal offset and - // does not perform a permutation round. - // 2. fill_block() does not pad, but resets the internal offset - // and does a permutation round. - match self.state { - SpongeState::Absorbing => { - self.hash.pad(); - self.hash.fill_block(); - } - SpongeState::Squeezing => { - // in the squeezing state we are not feeding messages, - // only reading portions of a state, so padding does not make sense. - // what we need is to perform computation and reset the internal offset to zero. - self.hash.fill_block(); - } - } - } -} - -#[derive(Clone, PartialEq)] -enum SpongeState { - Absorbing, - Squeezing, -} - -#[cfg(test)] -mod tests { - extern crate hex; - use super::*; - - #[test] - fn usage_example() { - let mut ro = RandomOracle::new(b"TestProtocol"); - ro.commit(b"msg1"); - ro.commit(b"msg2"); - { - let mut challenge1 = [0u8; 8]; - ro.challenge_bytes(&mut challenge1); - assert_eq!(hex::encode(challenge1), "7f04fadac332ce45"); - } - { - let mut challenge2 = [0u8; 200]; - ro.challenge_bytes(&mut challenge2); - } - { - let mut challenge3 = [0u8; 8]; - ro.challenge_bytes(&mut challenge3); - assert_eq!(hex::encode(challenge3), "2cd86eb9913c0dc7"); - } - ro.commit(b"msg3"); - { - let mut challenge4 = [0u8; 8]; - ro.challenge_bytes(&mut challenge4); - assert_eq!(hex::encode(challenge4), "383c7fc8d7bf8ad3"); - } - } - - #[test] - fn disambiguation() { - { - let mut ro = RandomOracle::new(b"TestProtocol"); - ro.commit(b"msg1msg2"); - { - let mut ch = [0u8; 8]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "42023e04ad4f232c"); - } - } - { - let mut ro = RandomOracle::new(b"TestProtocol"); - ro.commit(b"msg1"); - ro.commit(b"msg2"); - { - let mut ch = [0u8; 8]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "7f04fadac332ce45"); - } - } - { - let mut ro = RandomOracle::new(b"TestProtocol"); - ro.commit(b"msg"); - ro.commit(b"1msg2"); - { - let mut ch = [0u8; 8]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "dbbd832ca1fd3c2f"); - } - } - { - let mut ro = RandomOracle::new(b"TestProtocol"); - ro.commit(b"ms"); - ro.commit(b"g1ms"); - ro.commit(b"g2"); - { - let mut ch = [0u8; 8]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "18860c017b1d28ec"); - } - } - } -} From bafba49c864c97da969589d7ac959449c3dd1fff Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Mon, 12 Mar 2018 12:36:18 -0700 Subject: [PATCH 055/186] rust-toolchain = nightly-2018-03-05 --- rust-toolchain | 1 + 1 file changed, 1 insertion(+) create mode 100644 rust-toolchain diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 00000000..0786e6f1 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly-2018-03-05 From 9f9d11009358d8b398fc9c38bc60eab79adc083e Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 14 Mar 2018 11:27:22 -0700 Subject: [PATCH 056/186] proof_transcript: switch to Keccak with half-duplex support (#16) This switches tiny_keccak to our fork (https://github.com/debris/tiny-keccak/pull/24) which adds half-duplex support (meaning, both absorb and squeeze can be called multiple times, and one can switch from one to another multiple times). This fixes the bug where multiple calls to `squeeze` produced identical outputs. NOTE: We will review this implementation together with PR https://github.com/debris/tiny-keccak/pull/24 later. --- Cargo.toml | 5 +- src/proof_transcript.rs | 129 +++++++++++++++++++++++----------------- 2 files changed, 77 insertions(+), 57 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 629799cd..0274ba0e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ authors = ["Cathie "] curve25519-dalek = { version = "^0.15", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" -tiny-keccak = "^1.4" byteorder = "1.2.1" [dev-dependencies] @@ -16,3 +15,7 @@ hex = "^0.3" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] std = ["curve25519-dalek/std"] + +[dependencies.tiny-keccak] +git = 'https://github.com/chain/tiny-keccak.git' +rev = '5925f81b3c351440283c3328e2345d982aac0f6e' diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index e4d0762e..d75e7bde 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -6,19 +6,14 @@ use curve25519_dalek::scalar::Scalar; -// XXX fix up deps (see comment below re: "api somewhat unclear") +// XXX This uses experiment fork of tiny_keccak with half-duplex +// support that we require in this implementation. +// Review this after this PR is merged or updated: +// https://github.com/debris/tiny-keccak/pull/24 use tiny_keccak::Keccak; use byteorder::{ByteOrder, LittleEndian}; -/// The `SpongeState` enum describes the state of the internal sponge -/// used by a `ProofTranscript`. -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -enum SpongeState { - Absorbing, - Squeezing, -} - /// The `ProofTranscript` struct represents a transcript of messages /// between a prover and verifier engaged in a public-coin argument. /// @@ -66,17 +61,13 @@ enum SpongeState { #[derive(Clone)] pub struct ProofTranscript { hash: Keccak, - state: SpongeState, } impl ProofTranscript { - /// Begin an new, empty proof transcript, using the given `label` + /// Begin a new, empty proof transcript, using the given `label` /// for domain separation. pub fn new(label: &[u8]) -> Self { - let mut ro = ProofTranscript { - hash: Keccak::new_shake128(), - state: SpongeState::Absorbing, - }; + let mut ro = ProofTranscript { hash: Keccak::new_shake128() }; ro.commit(label); // makes sure the label is disambiguated from the rest of the messages. ro.pad(); @@ -89,7 +80,6 @@ impl ProofTranscript { /// /// Each message must be shorter than 64Kb (65536 bytes). pub fn commit(&mut self, message: &[u8]) { - self.set_state(SpongeState::Absorbing); let len = message.len(); if len > (u16::max_value() as usize) { @@ -99,6 +89,10 @@ impl ProofTranscript { let mut len_prefix = [0u8; 2]; LittleEndian::write_u16(&mut len_prefix, len as u16); + // XXX we rely on tiny_keccak experimental support for half-duplex mode and + // correct switching from absorbing to squeezing and back. + // Review this after this PR is merged or updated: + // https://github.com/debris/tiny-keccak/pull/24 self.hash.absorb(&len_prefix); self.hash.absorb(message); } @@ -116,7 +110,11 @@ impl ProofTranscript { /// Extracts an arbitrary-sized challenge byte slice. pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { - self.set_state(SpongeState::Squeezing); + + // XXX we rely on tiny_keccak experimental support for half-duplex mode and + // correct switching from absorbing to squeezing and back. + // Review this after this PR is merged or updated: + // https://github.com/debris/tiny-keccak/pull/24 self.hash.squeeze(&mut output); } @@ -130,44 +128,12 @@ impl ProofTranscript { Scalar::from_bytes_mod_order_wide(&buf) } - /// Set the sponge state to `new_state`. - /// - /// Does necessary padding+permutation if needed to transition - /// from one state to another. - fn set_state(&mut self, new_state: SpongeState) { - if self.state != new_state { - self.pad(); - self.state = new_state; - } - } - - /// Pad separates the prior operations by a full permutation. + /// Pad separates the prior operations by padding + /// the rest of the block with zeroes and applying a permutation. /// Each incoming message is length-prefixed anyway, but padding /// enables pre-computing and re-using the oracle state. fn pad(&mut self) { - // tiny_keccak's API is not very clear, - // so we'd probably need to fork and either document it, or tweak to make it more sensible. - // 1. pad() only adds keccak padding, but does not advance internal offset and - // does not perform a permutation round. - // 2. fill_block() does not pad, but resets the internal offset - // and does a permutation round. - // - // XXX(hdevalence) before this is "production-ready", we - // should sort out what tiny_keccak is actually doing and - // decide on something sensible. Maybe this overlaps with - // Noise NXOF work? - match self.state { - SpongeState::Absorbing => { - self.hash.pad(); - self.hash.fill_block(); - } - SpongeState::Squeezing => { - // in the squeezing state we are not feeding messages, - // only reading portions of a state, so padding does not make sense. - // what we need is to perform computation and reset the internal offset to zero. - self.hash.fill_block(); - } - } + self.hash.fill_block(); } } @@ -176,6 +142,57 @@ mod tests { extern crate hex; use super::*; + #[test] + fn challenges_must_be_random() { + { + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"test"); + { + let mut ch = [0u8; 32]; + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "9ba30a0e71e8632b55fbae92495440b6afb5d2646ba6b1bb419933d97e06b810" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "add523844517c2320fc23ca72423b0ee072c6d076b05a6a7b6f46d8d2e322f94" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "ac279a11cac0b1271d210592c552d719d82d67c82d7f86772ed7bc6618b0927c" + ); + } + + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"test"); + { + let mut ch = [0u8; 16]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "9ba30a0e71e8632b55fbae92495440b6"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "afb5d2646ba6b1bb419933d97e06b810"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "add523844517c2320fc23ca72423b0ee"); + } + + let mut ro = ProofTranscript::new(b"TestProtocol"); + ro.commit(b"test"); + { + let mut ch = [0u8; 16]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "9ba30a0e71e8632b55fbae92495440b6"); + ro.commit(b"extra commitment"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "11536e09cedbb6b302d8c7cd96471be5"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "058c383da5f2e193a381aaa420b505b2"); + } + } + } + #[test] fn messages_are_disambiguated_by_length_prefix() { { @@ -184,7 +201,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "039f39a21e3cce4a"); + assert_eq!(hex::encode(ch), "1ad843ea2bf7f8b6"); } } { @@ -194,7 +211,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "b4c47055cfcec1d2"); + assert_eq!(hex::encode(ch), "79abbe29d8c33bb0"); } } { @@ -204,7 +221,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "325247ab6948b7a1"); + assert_eq!(hex::encode(ch), "f88d0f790cde50d5"); } } { @@ -215,7 +232,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "04068112e4fa0f44"); + assert_eq!(hex::encode(ch), "90ca22b443fb78a1"); } } } From 48d884cd320c90792d6712081f0cf4da28f0d68b Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 14 Mar 2018 16:00:37 -0700 Subject: [PATCH 057/186] Add API for generators (#15) This introduces `Generators` and `GeneratorsView` types. `Generators` are concrete sets of points for all parties (if many). `GeneratorsView` is a view to a specific slice of the generators. --- src/generators.rs | 213 +++++++++++++++++++++++++++++++++++++ src/inner_product_proof.rs | 6 +- src/lib.rs | 7 +- 3 files changed, 222 insertions(+), 4 deletions(-) create mode 100644 src/generators.rs diff --git a/src/generators.rs b/src/generators.rs new file mode 100644 index 00000000..ad537a28 --- /dev/null +++ b/src/generators.rs @@ -0,0 +1,213 @@ +//! The `generators` module contains API for producing a +//! set of generators for a rangeproof. +//! +//! +//! # Example +//! +//! ``` +//! # extern crate ristretto_bulletproofs; +//! # use ristretto_bulletproofs::generators::Generators; +//! # fn main() { +//! +//! let generators = Generators::new(64,1); +//! let view = generators.all(); +//! let G0 = view.G[0]; +//! let H0 = view.H[0]; +//! +//! # } +//! ``` + +#![allow(non_snake_case)] +#![deny(missing_docs)] + +// XXX we should use Sha3 everywhere + +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; +use sha2::{Digest, Sha256}; + +/// The `GeneratorsChain` creates an arbitrary-long sequence of orthogonal generators. +/// The sequence can be deterministically produced starting with an arbitrary point. +struct GeneratorsChain { + next_point: RistrettoPoint, +} + +impl GeneratorsChain { + /// Creates a chain of generators, determined by the hash of `label`. + fn new(label: &[u8]) -> Self { + let mut hash = Sha256::default(); + hash.input(b"GeneratorsChainInit"); + hash.input(label); + let next_point = RistrettoPoint::from_hash(hash); + GeneratorsChain { next_point } + } +} + +impl Default for GeneratorsChain { + fn default() -> Self { + Self::new(&[]) + } +} + +impl Iterator for GeneratorsChain { + type Item = RistrettoPoint; + fn next(&mut self) -> Option { + let current_point = self.next_point; + let mut hash = Sha256::default(); + hash.input(b"GeneratorsChainNext"); + hash.input(current_point.compress().as_bytes()); + self.next_point = RistrettoPoint::from_hash(hash); + Some(current_point) + } +} + +/// `Generators` contains all the generators needed for aggregating `m` range proofs of `n` bits each. +#[derive(Clone)] +pub struct Generators { + /// Number of bits in a rangeproof + pub n: usize, + /// Number of values or parties + pub m: usize, + /// Main base of a Pedersen commitment + B: RistrettoPoint, + /// Base for the blinding factor in a Pedersen commitment + B_blinding: RistrettoPoint, + /// Per-bit generators for the bit values + G: Vec, + /// Per-bit generators for the bit blinding factors + H: Vec, +} + +/// Represents a view into `Generators` relevant to a specific range proof. +pub struct GeneratorsView<'a> { + /// Main base of a Pedersen commitment + pub B: &'a RistrettoPoint, + /// Base for the blinding factor in a Pedersen commitment + pub B_blinding: &'a RistrettoPoint, + /// Per-bit generators for the bit values + pub G: &'a [RistrettoPoint], + /// Per-bit generators for the bit blinding factors + pub H: &'a [RistrettoPoint], +} + +impl Generators { + /// Creates generators for `m` range proofs of `n` bits each. + pub fn new(n: usize, m: usize) -> Self { + // Using unwrap is safe here, because the iterator is unbounded. + let B = GeneratorsChain::new(b"Bulletproofs.Generators.B") + .next() + .unwrap(); + let B_blinding = GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding") + .next() + .unwrap(); + + let G = GeneratorsChain::new(b"Bulletproofs.Generators.G") + .take(n * m) + .collect(); + let H = GeneratorsChain::new(b"Bulletproofs.Generators.H") + .take(n * m) + .collect(); + + Generators { + n, + m, + B, + B_blinding, + G, + H, + } + } + + /// Returns a view into the entirety of the generators. + pub fn all(&self) -> GeneratorsView { + GeneratorsView { + B: &self.B, + B_blinding: &self.B_blinding, + G: &self.G[..], + H: &self.H[..], + } + } + + /// Returns j-th share of generators, with an appropriate + /// slice of vectors G and H for the j-th range proof. + pub fn share(&self, j: usize) -> GeneratorsView { + let lower = self.n * j; + let upper = self.n * (j + 1); + GeneratorsView { + B: &self.B, + B_blinding: &self.B_blinding, + G: &self.G[lower..upper], + H: &self.H[lower..upper], + } + } +} + +#[cfg(test)] +mod tests { + extern crate hex; + use super::*; + + #[test] + fn rangeproof_generators() { + let n = 2; + let m = 3; + let gens = Generators::new(n, m); + + // The concatenation of shares must be the full generator set + assert_eq!( + [gens.all().G[..n].to_vec(), gens.all().H[..n].to_vec()], + [gens.share(0).G[..].to_vec(), gens.share(0).H[..].to_vec()] + ); + assert_eq!( + [ + gens.all().G[n..][..n].to_vec(), + gens.all().H[n..][..n].to_vec(), + ], + [gens.share(1).G[..].to_vec(), gens.share(1).H[..].to_vec()] + ); + assert_eq!( + [ + gens.all().G[2 * n..][..n].to_vec(), + gens.all().H[2 * n..][..n].to_vec(), + ], + [gens.share(2).G[..].to_vec(), gens.share(2).H[..].to_vec()] + ); + } + + #[test] + fn generator_orthogonality() { + let n = 2; + let m = 1; + let gens = Generators::new(n, m); + let view = gens.all(); + + assert_eq!( + hex::encode(RISTRETTO_BASEPOINT_POINT.compress().as_bytes()), + "e2f2ae0a6abc4e71a884a961c500515f58e30b6aa582dd8db6a65945e08d2d76" + ); + assert_eq!( + hex::encode(view.B.compress().as_bytes()), + "6abd9de445ed16637be32da51bbd3fa114f984c52081258a1f476c8493f09731" + ); + assert_eq!( + hex::encode(view.B_blinding.compress().as_bytes()), + "5c97d2b3cd6994ae1a4d6bd7371b40800b6a28afb1db14b81b4b5107ed9c5478" + ); + assert_eq!( + hex::encode(view.G[0].compress().as_bytes()), + "688bac289f5e4ed902648278b4e81a2b8a028365b0a7753fd0242e499bd6200e" + ); + assert_eq!( + hex::encode(view.G[1].compress().as_bytes()), + "7e49425c91464e4b3aa4c4676e7deba7e91d1cfd1a19a0a39dfd73b0cecdb55c" + ); + assert_eq!( + hex::encode(view.H[0].compress().as_bytes()), + "50140daade760912586d04be961dab5d723d1aba05b536b13b99f69225ea4002" + ); + assert_eq!( + hex::encode(view.H[1].compress().as_bytes()), + "ac23f3c0964e8bb1b9c61869edbb39c4417a96d518715d2e3e60a03cd722d13d" + ); + } +} diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index c80d0d1d..3c805ecb 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -183,7 +183,11 @@ impl Proof { let expect_P = ristretto::vartime::multiscalar_mult(scalar_iter, points_iter); - if expect_P == *P { Ok(()) } else { Err(()) } + if expect_P == *P { + Ok(()) + } else { + Err(()) + } } } diff --git a/src/lib.rs b/src/lib.rs index 9cfcc8cd..b2a63f8b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,21 +1,22 @@ #![cfg_attr(feature = "bench", feature(test))] #![feature(nll)] - #![feature(test)] +extern crate byteorder; extern crate curve25519_dalek; -extern crate sha2; extern crate rand; -extern crate byteorder; +extern crate sha2; extern crate tiny_keccak; #[cfg(test)] extern crate test; pub mod proof_transcript; +pub mod generators; mod range_proof; mod inner_product_proof; pub mod scalar; pub use range_proof::*; +pub use generators::*; From 843635f20d6a739ddcb9ae415c1cc33a6550131a Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 14 Mar 2018 16:11:26 -0700 Subject: [PATCH 058/186] cleanup some warnings in scalar.rs and generators.rs (#18) This cleans up some warnings, but not all to avoid conflicts with #17. --- src/generators.rs | 2 +- src/scalar.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/generators.rs b/src/generators.rs index ad537a28..94bd77b0 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -23,7 +23,6 @@ // XXX we should use Sha3 everywhere use curve25519_dalek::ristretto::RistrettoPoint; -use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; use sha2::{Digest, Sha256}; /// The `GeneratorsChain` creates an arbitrary-long sequence of orthogonal generators. @@ -146,6 +145,7 @@ impl Generators { mod tests { extern crate hex; use super::*; + use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; #[test] fn rangeproof_generators() { diff --git a/src/scalar.rs b/src/scalar.rs index edcf6360..531d89c7 100644 --- a/src/scalar.rs +++ b/src/scalar.rs @@ -63,20 +63,20 @@ mod test { #[test] fn batch_invert_matches_nonbatched() { - let W = Scalar::from_bits( + let w = Scalar::from_bits( *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", ); - let X = Scalar::from_bits( + let x = Scalar::from_bits( *b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04", ); - let Y = Scalar::from_bits( + let y = Scalar::from_bits( *b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05", ); - let Z = Scalar::from_bits( + let z = Scalar::from_bits( *b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b", ); - let list = vec![W, X, Y, Z, W * Y, X * Z, Y * Y, W * Z]; + let list = vec![w, x, y, z, w * y, x * z, y * y, w * z]; let mut inv_list = list.clone(); batch_invert(&mut inv_list[..]); for i in 0..8 { From a1e5ac5ec38d9930101683a4567e8d101365a600 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 12 Mar 2018 14:05:52 -0700 Subject: [PATCH 059/186] temporarily use dalek develop --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 0274ba0e..3941d6c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,3 +19,6 @@ std = ["curve25519-dalek/std"] [dependencies.tiny-keccak] git = 'https://github.com/chain/tiny-keccak.git' rev = '5925f81b3c351440283c3328e2345d982aac0f6e' + +[patch.crates-io] +curve25519-dalek = { git = 'https://github.com/dalek-cryptography/curve25519-dalek', branch = 'develop' } From 180c8878a6b33b109c9582a3f8d5e9df3e15ee22 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 12 Mar 2018 14:27:25 -0700 Subject: [PATCH 060/186] Add adjustment factor for H bases to IPP API Does not yet inline the extra scalar mults into the IPP loop, but makes it possible to do so in the future. --- src/inner_product_proof.rs | 71 +++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 3c805ecb..9535756c 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,6 +1,8 @@ #![allow(non_snake_case)] use std::iter; +use std::borrow::Borrow; + use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::scalar::Scalar; @@ -25,18 +27,26 @@ pub struct Proof { impl Proof { /// Create an inner-product proof. /// + /// The proof is created with respect to the bases G, Hprime, + /// where Hprime[i] = H[i] * Hprime_factors[i]. + /// /// The `verifier` is passed in as a parameter so that the /// challenges depend on the *entire* transcript (including parent /// protocols). - pub fn create( + pub fn create( verifier: &mut ProofTranscript, P: &RistrettoPoint, Q: &RistrettoPoint, + Hprime_factors: I, mut G_vec: Vec, mut H_vec: Vec, mut a_vec: Vec, mut b_vec: Vec, - ) -> Proof { + ) -> Proof + where + I: IntoIterator, + I::Item: Borrow, + { // Create slices G, H, a, b backed by their respective // vectors. This lets us reslice as we compress the lengths // of the vectors in the main loop below. @@ -53,6 +63,12 @@ impl Proof { assert_eq!(a.len(), n); assert_eq!(b.len(), n); + // XXX save these scalar mults by unrolling them into the + // first iteration of the loop below + for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) { + *H_i = (&*H_i) * h_i.borrow(); + } + let lg_n = n.next_power_of_two().trailing_zeros() as usize; let mut L_vec = Vec::with_capacity(lg_n); let mut R_vec = Vec::with_capacity(lg_n); @@ -107,14 +123,19 @@ impl Proof { }; } - fn verify( + fn verify( &self, verifier: &mut ProofTranscript, + Hprime_factors: I, P: &RistrettoPoint, Q: &RistrettoPoint, G_vec: &Vec, H_vec: &Vec, - ) -> Result<(), ()> { + ) -> Result<(), ()> + where + I: IntoIterator, + I::Item: Borrow, + { // XXX prover should commit to n let lg_n = self.L_vec.len(); let n = 1 << lg_n; @@ -160,7 +181,11 @@ impl Proof { let a_times_s: Vec<_> = s.iter().map(|s_i| self.a * s_i).collect(); - let b_div_s: Vec<_> = s.iter().rev().map(|s_i_inv| self.b * s_i_inv).collect(); + let b_div_s_times_h: Vec<_> = s.iter() + .rev() + .zip(Hprime_factors.into_iter()) + .map(|(s_i_inv, h_i)| (self.b * s_i_inv) * h_i.borrow()) + .collect(); let neg_x_sq: Vec<_> = challenges_sq.iter().map(|x| -x).collect(); @@ -171,7 +196,7 @@ impl Proof { let scalar_iter = iter::once(&ab) .chain(a_times_s.iter()) - .chain(b_div_s.iter()) + .chain(b_div_s_times_h.iter()) .chain(neg_x_sq.iter()) .chain(neg_x_inv_sq.iter()); @@ -195,28 +220,41 @@ impl Proof { mod tests { use super::*; + use rand::OsRng; + fn test_helper_create(n: usize, expected_a: &[u8; 32], expected_b: &[u8; 32]) { - let mut verifier = ProofTranscript::new(b"innerproducttest"); + let mut rng = OsRng::new().unwrap(); + let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); let G_vec = make_generators(G, n); let H_vec = make_generators(H, n); - let a_vec = vec![Scalar::from_u64(1); n]; - let b_vec = vec![Scalar::from_u64(2); n]; + let a_vec = vec![Scalar::from_u64(982345); n]; + let b_vec = vec![Scalar::from_u64(827394); n]; + + let H_adjustments: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let Q = RistrettoPoint::hash_from_bytes::(b"test point"); let c = inner_product(&a_vec, &b_vec); + let b_adj: Vec<_> = b_vec + .iter() + .zip(H_adjustments.iter()) + .map(|(b_i, h_i)| b_i * h_i) + .collect(); + let P = ristretto::vartime::multiscalar_mult( - a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), + a_vec.iter().chain(b_adj.iter()).chain(iter::once(&c)), G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), ); + let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = Proof::create( &mut verifier, &P, &Q, + &H_adjustments, G_vec.clone(), H_vec.clone(), a_vec.clone(), @@ -224,7 +262,11 @@ mod tests { ); let mut verifier = ProofTranscript::new(b"innerproducttest"); - assert!(proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec).is_ok()); + assert!( + proof + .verify(&mut verifier, &H_adjustments, &P, &Q, &G_vec, &H_vec) + .is_ok() + ); //assert_eq!(proof.a.as_bytes(), expected_a); //assert_eq!(proof.b.as_bytes(), expected_b); @@ -282,12 +324,14 @@ mod bench { let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); let a_vec = vec![Scalar::from_u64(1); n]; let b_vec = vec![Scalar::from_u64(2); n]; + let ones = vec![Scalar::from_u64(1); n]; b.iter(|| { Proof::create( &mut verifier, &P, &Q, + &ones, G_vec.clone(), H_vec.clone(), a_vec.clone(), @@ -314,10 +358,13 @@ mod bench { G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), ); + let ones = vec![Scalar::from_u64(1); n]; + let proof = Proof::create( &mut verifier, &P, &Q, + &ones, G_vec.clone(), H_vec.clone(), a_vec.clone(), @@ -325,7 +372,7 @@ mod bench { ); let mut verifier = ProofTranscript::new(b"innerproducttest"); - b.iter(|| proof.verify(&mut verifier, &P, &Q, &G_vec, &H_vec)); + b.iter(|| proof.verify(&mut verifier, &ones, &P, &Q, &G_vec, &H_vec)); } #[bench] From 6651b1396191172d9976f7fccc856fff96e44b0e Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 13 Mar 2018 11:16:16 -0700 Subject: [PATCH 061/186] The point P is not used in proof creation --- src/inner_product_proof.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 9535756c..f68d46a9 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -35,7 +35,6 @@ impl Proof { /// protocols). pub fn create( verifier: &mut ProofTranscript, - P: &RistrettoPoint, Q: &RistrettoPoint, Hprime_factors: I, mut G_vec: Vec, @@ -252,7 +251,6 @@ mod tests { let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = Proof::create( &mut verifier, - &P, &Q, &H_adjustments, G_vec.clone(), @@ -329,7 +327,6 @@ mod bench { b.iter(|| { Proof::create( &mut verifier, - &P, &Q, &ones, G_vec.clone(), @@ -362,7 +359,6 @@ mod bench { let proof = Proof::create( &mut verifier, - &P, &Q, &ones, G_vec.clone(), From 48b8372c162e01dbad8eafee9f0e07001af70a38 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 13 Mar 2018 14:47:48 -0700 Subject: [PATCH 062/186] Add a util module, with an iterator for scalar powers. --- src/lib.rs | 2 ++ src/util.rs | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 src/util.rs diff --git a/src/lib.rs b/src/lib.rs index b2a63f8b..321b2695 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,8 @@ extern crate tiny_keccak; #[cfg(test)] extern crate test; +mod util; + pub mod proof_transcript; pub mod generators; mod range_proof; diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 00000000..59b445c8 --- /dev/null +++ b/src/util.rs @@ -0,0 +1,43 @@ +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use curve25519_dalek::scalar::Scalar; + +/// Provides an iterator over the powers of a `Scalar`. +/// +/// This struct is created by the `exp_iter` function. +pub struct ScalarExp { + x: Scalar, + next_exp_x: Scalar, +} + +impl Iterator for ScalarExp { + type Item = Scalar; + + fn next(&mut self) -> Option { + let exp_x = self.next_exp_x; + self.next_exp_x *= self.x; + Some(exp_x) + } +} + +/// Return an iterator of the powers of `x`. +pub fn exp_iter(x: Scalar) -> ScalarExp { + let next_exp_x = Scalar::one(); + ScalarExp{ x, next_exp_x } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn exp_2_is_powers_of_2() { + let exp_2: Vec<_> = exp_iter(Scalar::from_u64(2)).take(4).collect(); + + assert_eq!(exp_2[0], Scalar::from_u64(1)); + assert_eq!(exp_2[1], Scalar::from_u64(2)); + assert_eq!(exp_2[2], Scalar::from_u64(4)); + assert_eq!(exp_2[3], Scalar::from_u64(8)); + } +} From cd341afc308e4950f23497cf5dc7d13b769176e4 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 13 Mar 2018 15:08:48 -0700 Subject: [PATCH 063/186] Test IPP generation with RHS adjustment exponents --- src/inner_product_proof.rs | 82 ++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 43 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index f68d46a9..bbf7ff02 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -12,6 +12,8 @@ use scalar; use proof_transcript::ProofTranscript; +use util; + use range_proof::inner_product; use range_proof::make_generators; @@ -221,88 +223,82 @@ mod tests { use rand::OsRng; - fn test_helper_create(n: usize, expected_a: &[u8; 32], expected_b: &[u8; 32]) { + fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); - let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G_vec = make_generators(G, n); - let H_vec = make_generators(H, n); + // XXX fix up generators + let B = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let B_blinding = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let G = make_generators(B, n); + let H = make_generators(B_blinding, n); - let a_vec = vec![Scalar::from_u64(982345); n]; - let b_vec = vec![Scalar::from_u64(827394); n]; + // Q would be determined upstream in the protocol, so we pick a random one. + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); - let H_adjustments: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); + // a and b are the vectors for which we want to prove c = + let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); + let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); + let c = inner_product(&a, &b); - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); - let c = inner_product(&a_vec, &b_vec); + // y_inv is (the inverse of) a random challenge + let y_inv = Scalar::random(&mut rng); - let b_adj: Vec<_> = b_vec - .iter() - .zip(H_adjustments.iter()) - .map(|(b_i, h_i)| b_i * h_i) - .collect(); + // P would be determined upstream, but we need a correct P to check the proof. + // + // To generate P = + + Q, compute + // P = + + Q, + // where b' = b \circ y^(-n) + let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi); + // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime + let a_prime = a.iter().cloned(); let P = ristretto::vartime::multiscalar_mult( - a_vec.iter().chain(b_adj.iter()).chain(iter::once(&c)), - G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), + a_prime.chain(b_prime).chain(iter::once(c)), + G.iter().chain(H.iter()).chain(iter::once(&Q)), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); let proof = Proof::create( &mut verifier, &Q, - &H_adjustments, - G_vec.clone(), - H_vec.clone(), - a_vec.clone(), - b_vec.clone(), + util::exp_iter(y_inv), + G.clone(), + H.clone(), + a.clone(), + b.clone(), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); assert!( proof - .verify(&mut verifier, &H_adjustments, &P, &Q, &G_vec, &H_vec) + .verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H) .is_ok() ); - - //assert_eq!(proof.a.as_bytes(), expected_a); - //assert_eq!(proof.b.as_bytes(), expected_b); } #[test] fn make_ipp_1() { - test_helper_create(1, &[0; 32], &[0; 32]); + test_helper_create(1); } #[test] fn make_ipp_2() { - test_helper_create(2, &[0; 32], &[0; 32]); + test_helper_create(2); } #[test] fn make_ipp_4() { - test_helper_create(4, &[0; 32], &[0; 32]); + test_helper_create(4); } #[test] - fn make_ipp_64() { - // These test vectors don't have a ground truth, they're just to catch accidental changes to the computation. - test_helper_create( - 64, - b"=\xa2\xed\xd2i\x1a\xb3'oF\xba:S\x12.\xbd)\xe1F\xbeI\xb4+\x11V&\xa6\xae\x1fGd\x04", - b"zD\xdb\xa5\xd34fO\xde\x8ctu\xa6$\\zS\xc2\x8d|\x93hW\"\xacLL]?\x8e\xc8\x08", - ); + fn make_ipp_32() { + test_helper_create(32); } #[test] - fn make_ipp_32() { - // These test vectors don't have a ground truth, they're just to catch accidental changes to the computation. - test_helper_create( - 32, - b"l\xa3\xa8\xda\xca\xf9\xdbec|i\xb32i\xc0'\xc3H\xde+\xa0P\x0e;.\xf5\x9cf'?\xa6\n", - b"\xebr[X{\x90\xa5s\xf0[\xdb\xc3\x86\xd8\xa1:\x86\x91\xbcW@\xa1\x1cv\\\xea9\xcdN~L\x05", - ); + fn make_ipp_64() { + test_helper_create(64); } } From 0990a2b3304b510425d084ccf93f620f0486f644 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 13 Mar 2018 15:19:21 -0700 Subject: [PATCH 064/186] Use iterators to remove allocs from IPP verification --- src/inner_product_proof.rs | 50 +++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index bbf7ff02..fd811cf0 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -165,6 +165,7 @@ impl Proof { let mut s = Vec::with_capacity(n); for i in 0..n { let mut s_i = allinv; + // XXX remove this loop via the bit twiddling mentioned in the paper for j in 0..lg_n { if bit(i, j) == 1 { // The challenges are stored in "creation order" as [x_k,...,x_1] @@ -175,39 +176,32 @@ impl Proof { } let s = s; - // so many allocs :( - // these were supposed to be iterators but the dalek trait doesn't accept values + let a_times_s = s.iter().map(|s_i| self.a * s_i); - let ab = self.a * self.b; + // 1/s[i] is s[!i], and !i runs from n-1 to 0 as i runs from 0 to n-1 + let inv_s = s.iter().rev(); - let a_times_s: Vec<_> = s.iter().map(|s_i| self.a * s_i).collect(); + let h_times_b_div_s = Hprime_factors + .into_iter() + .zip(inv_s) + .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); - let b_div_s_times_h: Vec<_> = s.iter() - .rev() - .zip(Hprime_factors.into_iter()) - .map(|(s_i_inv, h_i)| (self.b * s_i_inv) * h_i.borrow()) - .collect(); + let neg_x_sq = challenges_sq.iter().map(|x| -x); - let neg_x_sq: Vec<_> = challenges_sq.iter().map(|x| -x).collect(); + let neg_x_inv_sq = inv_challenges.iter().map(|x_inv| -(x_inv * x_inv)); - let neg_x_inv_sq: Vec<_> = inv_challenges - .iter() - .map(|x_inv| -(x_inv * x_inv)) - .collect(); - - let scalar_iter = iter::once(&ab) - .chain(a_times_s.iter()) - .chain(b_div_s_times_h.iter()) - .chain(neg_x_sq.iter()) - .chain(neg_x_inv_sq.iter()); - - let points_iter = iter::once(Q) - .chain(G_vec.iter()) - .chain(H_vec.iter()) - .chain(self.L_vec.iter()) - .chain(self.R_vec.iter()); - - let expect_P = ristretto::vartime::multiscalar_mult(scalar_iter, points_iter); + let expect_P = ristretto::vartime::multiscalar_mult( + iter::once(self.a * self.b) + .chain(a_times_s) + .chain(h_times_b_div_s) + .chain(neg_x_sq) + .chain(neg_x_inv_sq), + iter::once(Q) + .chain(G_vec.iter()) + .chain(H_vec.iter()) + .chain(self.L_vec.iter()) + .chain(self.R_vec.iter()), + ); if expect_P == *P { Ok(()) From 59934f3494e0fff2d987b26a31fa25882f067ba2 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 13 Mar 2018 12:12:39 -0700 Subject: [PATCH 065/186] Use the inner-product proof for range proofs. --- src/inner_product_proof.rs | 3 +- src/range_proof.rs | 587 ++++++++++++++++++++++--------------- src/util.rs | 4 +- 3 files changed, 356 insertions(+), 238 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index fd811cf0..dfcf95d9 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -19,6 +19,7 @@ use range_proof::make_generators; use sha2::Sha256; +#[derive(Clone, Debug)] pub struct Proof { L_vec: Vec, R_vec: Vec, @@ -124,7 +125,7 @@ impl Proof { }; } - fn verify( + pub fn verify( &self, verifier: &mut ProofTranscript, Hprime_factors: I, diff --git a/src/range_proof.rs b/src/range_proof.rs index ba4cd3fd..bda2516b 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -1,59 +1,78 @@ #![allow(non_snake_case)] +use rand::Rng; + use std::iter; + use sha2::{Digest, Sha256, Sha512}; + use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; -use curve25519_dalek::traits::Identity; +use curve25519_dalek::traits::{Identity, IsIdentity}; use curve25519_dalek::scalar::Scalar; -use rand::OsRng; -struct PolyDeg3(Scalar, Scalar, Scalar); +// XXX rename this maybe ?? at least `inner_product_proof::Proof` is too long. +// maybe `use inner_product_proof::IPProof` would be better? +use inner_product_proof; -struct VecPoly2(Vec, Vec); +use proof_transcript::ProofTranscript; +use util; -pub struct RangeProof { - t_x_blinding: Scalar, - e_blinding: Scalar, - t: Scalar, +struct PolyDeg3(Scalar, Scalar, Scalar); - // don't need if doing inner product proof - l: Vec, - r: Vec, +struct VecPoly2(Vec, Vec); - // committed values +/// The `RangeProof` struct represents a single range proof. +#[derive(Clone, Debug)] +pub struct RangeProof { + /// Commitment to the value + // XXX this should not be included, so that we can prove about existing commitments + // included for now so that it's easier to test V: RistrettoPoint, + /// Commitment to the bits of the value A: RistrettoPoint, + /// Commitment to the blinding factors S: RistrettoPoint, + /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) T_1: RistrettoPoint, + /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) T_2: RistrettoPoint, - - // public knowledge - n: usize, - B: RistrettoPoint, - B_blinding: RistrettoPoint, + /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) + t_x: Scalar, + /// Blinding factor for the synthetic commitment to \\(t(x)\\) + t_x_blinding: Scalar, + /// Blinding factor for the synthetic commitment to the inner-product arguments + e_blinding: Scalar, + /// Proof data for the inner-product argument. + ipp_proof: inner_product_proof::Proof, } impl RangeProof { - pub fn generate_proof(v: u64, n: usize) -> RangeProof { - let mut rng: OsRng = OsRng::new().unwrap(); - // useful for debugging: - // let mut rng: StdRng = StdRng::from_seed(&[1, 2, 3, 4]); - - // Setup: generate points, commit to v (in the paper: g, h, bold(g), bolg(h); line 34) - let B = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let B_blinding = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G = make_generators(B, n); - let H = make_generators(B_blinding, n); - let v_blinding = Scalar::random(&mut rng); - let V = B_blinding * v_blinding + B * Scalar::from_u64(v); - - // Compute A (line 39-42) - let a_blinding = Scalar::random(&mut rng); + /// Create a rangeproof. + pub fn generate_proof( + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + v: u64, + v_blinding: &Scalar, + ) -> RangeProof { + // XXX move this out to a common generators module + let B = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let B_blinding = RistrettoPoint::hash_from_bytes::("there".as_bytes()); + + let G = make_generators(&B, n); + let H = make_generators(&B_blinding, n); + + let V = ristretto::multiscalar_mult(&[Scalar::from_u64(v), *v_blinding], &[B, B_blinding]); + + let a_blinding = Scalar::random(rng); + + // Compute A = + + a_blinding * B_blinding. let mut A = B_blinding * a_blinding; for i in 0..n { let v_i = (v >> i) & 1; + // XXX replace this with a conditional move if v_i == 0 { A -= H[i]; } else { @@ -61,161 +80,222 @@ impl RangeProof { } } - // Compute S (line 43-45) - let points_iter = iter::once(B_blinding).chain(G.iter()).chain(H.iter()); - let randomness: Vec<_> = (0..(1 + 2 * n)).map(|_| Scalar::random(&mut rng)).collect(); - let S = ristretto::multiscalar_mult(&randomness, points_iter); - - // Save/label randomness to be used later (in the paper: rho, s_L, s_R) - let s_blinding = &randomness[0]; - let s_a = &randomness[1..(n + 1)]; - let s_b = &randomness[(n + 1)..(1 + 2 * n)]; - - // Generate y, z by committing to A, S (line 46-48) - let (y, z) = commit(&A, &S); - - // Calculate t by calculating vectors l0, l1, r0, r1 and multiplying - let mut l = VecPoly2::new(n); - let mut r = VecPoly2::new(n); - let z2 = z * z; - let mut t = PolyDeg3::new(); + let s_blinding = Scalar::random(rng); + let s_L: Vec<_> = (0..n).map(|_| Scalar::random(rng)).collect(); + let s_R: Vec<_> = (0..n).map(|_| Scalar::random(rng)).collect(); + + // Compute S = + + s_blinding * B_blinding. + let S = ristretto::multiscalar_mult( + iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), + iter::once(&B_blinding).chain(G.iter()).chain(H.iter()), + ); + + // Commit to V, A, S and get challenges y, z + transcript.commit(V.compress().as_bytes()); + transcript.commit(A.compress().as_bytes()); + transcript.commit(S.compress().as_bytes()); + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + + // Compute l, r + let mut l_poly = VecPoly2::zero(n); + let mut r_poly = VecPoly2::zero(n); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for i in 0..n { - let v_i = (v >> i) & 1; - l.0[i] -= z; - l.1[i] += s_a[i]; - r.0[i] += exp_y * z + z2 * exp_2; - r.1[i] += exp_y * s_b[i]; - if v_i == 0 { - r.0[i] -= exp_y; - } else { - l.0[i] += Scalar::one(); - } - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - t.0 = inner_product(&l.0, &r.0); - t.2 = inner_product(&l.1, &r.1); - // use Karatsuba algorithm to find t.1 = l.0*r.1 + l.1*r.0 - let l_add = add_vec(&l.0, &l.1); - let r_add = add_vec(&r.0, &r.1); - let l_r_mul = inner_product(&l_add, &r_add); - t.1 = l_r_mul - t.0 - t.2; - - // Generate x by committing to T_1, T_2 (line 49-54) - let t_1_blinding = Scalar::random(&mut rng); - let t_2_blinding = Scalar::random(&mut rng); - let T_1 = B * t.1 + B_blinding * t_1_blinding; - let T_2 = B * t.2 + B_blinding * t_2_blinding; - let (x, _) = commit(&T_1, &T_2); // TODO: use a different commit? - - // Generate final values for proof (line 55-60) - let t_x_blinding = t_1_blinding * x + t_2_blinding * x * x + z2 * v_blinding; - let e_blinding = a_blinding + s_blinding * x; - let t_hat = t.0 + t.1 * x + t.2 * x * x; - - // Calculate l, r - which is only necessary if not doing IPP (line 55-57) - // Adding this in a seperate loop so we can remove it easily later - let l_total = l.eval(x); - let r_total = r.eval(x); - - // Generate proof! (line 61) - RangeProof { - t_x_blinding: t_x_blinding, - e_blinding: e_blinding, - t: t_hat, - l: l_total, - r: r_total, - - V: V, - A: A, - S: S, - T_1: T_1, - T_2: T_2, - - n: n, - B: *B, - B_blinding: *B_blinding, - } - } + let zz = z * z; - pub fn verify_proof(&self) -> bool { - let (y, z) = commit(&self.A, &self.S); - let (x, _) = commit(&self.T_1, &self.T_2); - let G = make_generators(&self.B, self.n); - let mut hprime_vec = make_generators(&self.B_blinding, self.n); + for i in 0..n { + let a_L_i = Scalar::from_u64((v >> i) & 1); + let a_R_i = a_L_i - Scalar::one(); - // line 63: check that t = t0 + t1 * x + t2 * x * x - let z2 = z * z; - let z3 = z2 * z; - let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..self.n { - power_g += (z - z2) * exp_y - z3 * exp_2; + l_poly.0[i] = a_L_i - z; + l_poly.1[i] = s_L[i]; + r_poly.0[i] = exp_y * (a_R_i + z) + zz * exp_2; + r_poly.1[i] = exp_y * s_R[i]; - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - let t_check = self.B * power_g + self.V * z2 + self.T_1 * x + self.T_2 * x * x; - let t_commit = self.B * self.t + self.B_blinding * self.t_x_blinding; - if t_commit != t_check { - //println!("fails check on line 63"); - return false; - } - - // line 62: calculate hprime - // line 64: compute commitment to l, r - let mut sum_G = RistrettoPoint::identity(); - for i in 0..self.n { - sum_G += G[i]; + exp_y *= y; // y^i -> y^(i+1) + exp_2 += exp_2; // 2^i -> 2^(i+1) } - let mut big_p = self.A + self.S * x; - big_p -= sum_G * z; - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let inverse_y = Scalar::invert(&y); - let mut inv_exp_y = Scalar::one(); // start at y^-0 = 1 - for i in 0..self.n { - hprime_vec[i] = hprime_vec[i] * inv_exp_y; - big_p += hprime_vec[i] * (z * exp_y + z2 * exp_2); - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - inv_exp_y = inv_exp_y * inverse_y; // y^(-i) * y^(-1) -> y^(-(i+1)) - } + // Compute t(x) = + let t_poly = l_poly.inner_product(&r_poly); + + // Form commitments T_1, T_2 to t.1, t.2 + let t_1_blinding = Scalar::random(rng); + let t_2_blinding = Scalar::random(rng); + let T_1 = ristretto::multiscalar_mult(&[t_poly.1, t_1_blinding], &[B, B_blinding]); + let T_2 = ristretto::multiscalar_mult(&[t_poly.2, t_2_blinding], &[B, B_blinding]); + + // Commit to T_1, T_2 to get the challenge point x + transcript.commit(T_1.compress().as_bytes()); + transcript.commit(T_2.compress().as_bytes()); + let x = transcript.challenge_scalar(); + + // Evaluate t at x and run the IPP + let t_x = t_poly.0 + x * (t_poly.1 + x * t_poly.2); + let t_x_blinding = z * z * v_blinding + x * (t_1_blinding + x * t_2_blinding); + let e_blinding = a_blinding + x * s_blinding; + + transcript.commit(t_x.as_bytes()); + transcript.commit(t_x_blinding.as_bytes()); + transcript.commit(e_blinding.as_bytes()); + + // Get a challenge value to combine statements for the IPP + let w = transcript.challenge_scalar(); + let Q = w * B_blinding; + + // Generate the IPP proof + let ipp_proof = inner_product_proof::Proof::create( + transcript, + &Q, + util::exp_iter(y.invert()), + G, + H, + l_poly.eval(x), + r_poly.eval(x), + ); - // line 65: check that l, r are correct - let mut big_p_check = self.B_blinding * self.e_blinding; - let points_iter = G.iter().chain(hprime_vec.iter()); - let scalars_iter = self.l.iter().chain(self.r.iter()); - big_p_check += ristretto::multiscalar_mult(scalars_iter, points_iter); - if big_p != big_p_check { - //println!("fails check on line 65: big_p != g * l + hprime * r"); - return false; + RangeProof { + V, + A, + S, + T_1, + T_2, + t_x, + t_x_blinding, + e_blinding, + ipp_proof, } + } - // line 66: check that t is correct - if self.t != inner_product(&self.l, &self.r) { - //println!("fails check on line 66: t != l * r"); - return false; + pub fn verify( + &self, + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + ) -> Result<(), ()> { + // XXX move this out to a common generators module + let B = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); + let B_blinding = RistrettoPoint::hash_from_bytes::("there".as_bytes()); + + let G = make_generators(&B, n); + let H = make_generators(&B_blinding, n); + + // Replay the "interactive" protocol using the proof data to recompute all challenges. + + transcript.commit(self.V.compress().as_bytes()); + transcript.commit(self.A.compress().as_bytes()); + transcript.commit(self.S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + + transcript.commit(self.T_1.compress().as_bytes()); + transcript.commit(self.T_2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + transcript.commit(self.t_x.as_bytes()); + transcript.commit(self.t_x_blinding.as_bytes()); + transcript.commit(self.e_blinding.as_bytes()); + + let w = transcript.challenge_scalar(); + + // Check that t(x) is consistent with commitments V, T_1, T_2 + let poly_check = ristretto::vartime::multiscalar_mult( + &[ + z * z, + x, + x * x, + (delta(n, &y, &z) - self.t_x), + -self.t_x_blinding, + ], + &[self.V, self.T_1, self.T_2, B, B_blinding], + ); + + if !poly_check.is_identity() { + return Err(()); } - return true; + // Recompute P + t(x)Q = P + t(x)w B_blinding + + let G_sum = G.iter() + .fold(RistrettoPoint::identity(), |acc, G_i| acc + G_i); + + let y_inv = y.invert(); + let two_over_y = Scalar::from_u64(2) * y_inv; + let zz = z * z; + + let H_scalars = util::exp_iter(two_over_y) + .take(n) + .map(|exp_two_over_y| z + zz * exp_two_over_y); + + let P_plus_tx_Q = &self.A + + &ristretto::vartime::multiscalar_mult( + [w * self.t_x - self.e_blinding, x, -z] + .iter() + .cloned() + .chain(H_scalars), + [B_blinding, self.S, G_sum].iter().chain(H.iter()), + ); + + // XXX eliminate this when merging into a single multiscalar mult + let Q = w * B_blinding; + + // Return the result of IPP verification using the recomputed P + t(x) Q + self.ipp_proof.verify( + transcript, + util::exp_iter(y_inv).take(n), + &P_plus_tx_Q, + &Q, + &G, + &H, + ) } } -impl PolyDeg3 { - pub fn new() -> PolyDeg3 { - PolyDeg3(Scalar::zero(), Scalar::zero(), Scalar::zero()) - } +/// Compute +/// $$ +/// \\delta(y,z) = (z - z^2)<1, y^n> + z^3 <1, 2^n> +/// $$ +fn delta(n: usize, y: &Scalar, z: &Scalar) -> Scalar { + let two = Scalar::from_u64(2); + + // XXX this could be more efficient, esp for powers of 2 + let sum_of_powers_of_y = util::exp_iter(*y) + .take(n) + .fold(Scalar::zero(), |acc, x| acc + x); + + let sum_of_powers_of_2 = util::exp_iter(two) + .take(n) + .fold(Scalar::zero(), |acc, x| acc + x); + + let zz = z * z; + + (z - zz) * sum_of_powers_of_y - z * zz * sum_of_powers_of_2 } impl VecPoly2 { - pub fn new(n: usize) -> VecPoly2 { + pub fn zero(n: usize) -> VecPoly2 { VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) } + + pub fn inner_product(&self, rhs: &VecPoly2) -> PolyDeg3 { + // Uses Karatsuba's method + let l = self; + let r = rhs; + + let t0 = inner_product(&l.0, &r.0); + let t2 = inner_product(&l.1, &r.1); + + let l0_plus_l1 = add_vec(&l.0, &l.1); + let r0_plus_r1 = add_vec(&r.0, &r.1); + + let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + + PolyDeg3(t0, t1, t2) + } + pub fn eval(&self, x: Scalar) -> Vec { let n = self.0.len(); let mut out = vec![Scalar::zero(); n]; @@ -237,21 +317,6 @@ pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec generators } -pub fn commit(v1: &RistrettoPoint, v2: &RistrettoPoint) -> (Scalar, Scalar) { - let mut c1_digest = Sha512::new(); - c1_digest.input(v1.compress().as_bytes()); - c1_digest.input(v2.compress().as_bytes()); - let c1 = Scalar::from_hash(c1_digest); - - let mut c2_digest = Sha512::new(); - c2_digest.input(v1.compress().as_bytes()); - c2_digest.input(v2.compress().as_bytes()); - c2_digest.input(c1.as_bytes()); - let c2 = Scalar::from_hash(c2_digest); - - (c1, c2) -} - pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len() != b.len() { @@ -279,7 +344,33 @@ pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { #[cfg(test)] mod tests { use super::*; - use rand::Rng; + use rand::OsRng; + + #[test] + fn test_delta() { + let mut rng = OsRng::new().unwrap(); + let y = Scalar::random(&mut rng); + let z = Scalar::random(&mut rng); + + // Choose n = 256 to ensure we overflow the group order during + // the computation, to check that that's done correctly + let n = 256; + + // code copied from previous implementation + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + assert_eq!(power_g, delta(n, &y, &z),); + } #[test] fn test_inner_product() { @@ -297,83 +388,109 @@ mod tests { ]; assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); } + + fn create_and_verify_helper(n: usize) { + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + let mut rng = OsRng::new().unwrap(); + + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + let rp = RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding); + + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + + assert!(rp.verify(&mut transcript, &mut rng, n).is_ok()); + } + #[test] - fn test_rp_t() { - let rp = RangeProof::generate_proof(1, 1); - assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); - let rp = RangeProof::generate_proof(1, 2); - assert_eq!(rp.t, inner_product(&rp.l, &rp.r)); + fn create_and_verify_8() { + create_and_verify_helper(8); } + #[test] - fn verify_rp_simple() { - for n in &[1, 2, 4, 8, 16, 32] { - //println!("n: {:?}", n); - let rp = RangeProof::generate_proof(0, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) - 1, *n); - assert_eq!(rp.verify_proof(), true); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32), *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(2u64.pow(*n as u32) + 1, *n); - assert_eq!(rp.verify_proof(), false); - let rp = RangeProof::generate_proof(u64::max_value(), *n); - assert_eq!(rp.verify_proof(), false); - } + fn create_and_verify_16() { + create_and_verify_helper(16); } + #[test] - fn verify_rp_rand_big() { - for _ in 0..50 { - let mut rng: OsRng = OsRng::new().unwrap(); - let v: u64 = rng.next_u64(); - //println!("v: {:?}", v); - let rp = RangeProof::generate_proof(v, 32); - let expected = v <= 2u64.pow(32); - assert_eq!(rp.verify_proof(), expected); - } + fn create_and_verify_32() { + create_and_verify_helper(32); } + #[test] - fn verify_rp_rand_small() { - for _ in 0..50 { - let mut rng: OsRng = OsRng::new().unwrap(); - let v: u32 = rng.next_u32(); - //println!("v: {:?}", v); - let rp = RangeProof::generate_proof(v as u64, 32); - assert_eq!(rp.verify_proof(), true); - } + fn create_and_verify_64() { + create_and_verify_helper(64); } } #[cfg(test)] mod bench { use super::*; - use rand::Rng; + use rand::{OsRng, Rng}; use test::Bencher; + fn bench_create_helper(n: usize, b: &mut Bencher) { + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + let mut rng = OsRng::new().unwrap(); + + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + b.iter(|| RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding)); + } + + fn bench_verify_helper(n: usize, b: &mut Bencher) { + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + let mut rng = OsRng::new().unwrap(); + + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + let rp = RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding); + + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + + b.iter(|| rp.verify(&mut transcript, &mut rng, n)); + } + + #[bench] + fn create_rp_64(b: &mut Bencher) { + bench_create_helper(64, b); + } + #[bench] - fn generators(b: &mut Bencher) { - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; - b.iter(|| make_generators(&RISTRETTO_BASEPOINT_POINT, 100)); + fn create_rp_32(b: &mut Bencher) { + bench_create_helper(32, b); } + #[bench] - fn make_rp_64(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - b.iter(|| RangeProof::generate_proof(rng.next_u64(), 64)); + fn create_rp_16(b: &mut Bencher) { + bench_create_helper(32, b); } + #[bench] - fn make_rp_32(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - b.iter(|| RangeProof::generate_proof(rng.next_u32() as u64, 32)); + fn create_rp_8(b: &mut Bencher) { + bench_create_helper(32, b); } + #[bench] fn verify_rp_64(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - let rp = RangeProof::generate_proof(rng.next_u64(), 64); - b.iter(|| rp.verify_proof()); + bench_verify_helper(64, b); } + #[bench] fn verify_rp_32(b: &mut Bencher) { - let mut rng: OsRng = OsRng::new().unwrap(); - let rp = RangeProof::generate_proof(rng.next_u32() as u64, 32); - b.iter(|| rp.verify_proof()); + bench_verify_helper(32, b); + } + + #[bench] + fn verify_rp_16(b: &mut Bencher) { + bench_verify_helper(32, b); + } + + #[bench] + fn verify_rp_8(b: &mut Bencher) { + bench_verify_helper(32, b); } } diff --git a/src/util.rs b/src/util.rs index 59b445c8..1d21d7fb 100644 --- a/src/util.rs +++ b/src/util.rs @@ -24,13 +24,13 @@ impl Iterator for ScalarExp { /// Return an iterator of the powers of `x`. pub fn exp_iter(x: Scalar) -> ScalarExp { let next_exp_x = Scalar::one(); - ScalarExp{ x, next_exp_x } + ScalarExp { x, next_exp_x } } #[cfg(test)] mod tests { use super::*; - + #[test] fn exp_2_is_powers_of_2() { let exp_2: Vec<_> = exp_iter(Scalar::from_u64(2)).take(4).collect(); From 6364840e04cff26726e1d8566a44071be1c0734d Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 14 Mar 2018 16:49:54 -0700 Subject: [PATCH 066/186] Use generators API --- src/inner_product_proof.rs | 92 ++++++++++++++++----------------- src/proof_transcript.rs | 6 +-- src/range_proof.rs | 101 +++++++++++++++++++++++-------------- 3 files changed, 112 insertions(+), 87 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index dfcf95d9..74e94e19 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -14,8 +14,9 @@ use proof_transcript::ProofTranscript; use util; +use generators::Generators; + use range_proof::inner_product; -use range_proof::make_generators; use sha2::Sha256; @@ -131,8 +132,8 @@ impl Proof { Hprime_factors: I, P: &RistrettoPoint, Q: &RistrettoPoint, - G_vec: &Vec, - H_vec: &Vec, + G: &[RistrettoPoint], + H: &[RistrettoPoint], ) -> Result<(), ()> where I: IntoIterator, @@ -198,8 +199,8 @@ impl Proof { .chain(neg_x_sq) .chain(neg_x_inv_sq), iter::once(Q) - .chain(G_vec.iter()) - .chain(H_vec.iter()) + .chain(G.iter()) + .chain(H.iter()) .chain(self.L_vec.iter()) .chain(self.R_vec.iter()), ); @@ -221,11 +222,9 @@ mod tests { fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); - // XXX fix up generators - let B = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let B_blinding = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G = make_generators(B, n); - let H = make_generators(B_blinding, n); + let gens = Generators::new(n, 1); + let G = gens.share(0).G.to_vec(); + let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. let Q = RistrettoPoint::hash_from_bytes::(b"test point"); @@ -303,63 +302,66 @@ mod bench { use super::*; use test::Bencher; - fn bench_helper_create(n: usize, b: &mut Bencher) { - let mut verifier = ProofTranscript::new(b"innerproducttest"); - let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G_vec = make_generators(G, n); - let H_vec = make_generators(H, n); - let Q = RistrettoPoint::hash_from_bytes::("more".as_bytes()); - let P = RistrettoPoint::hash_from_bytes::("points".as_bytes()); - let a_vec = vec![Scalar::from_u64(1); n]; - let b_vec = vec![Scalar::from_u64(2); n]; + fn bench_helper_create(n: usize, bench: &mut Bencher) { + let gens = Generators::new(n, 1); + let G = gens.share(0).G.to_vec(); + let H = gens.share(0).H.to_vec(); + + // Q would be determined upstream in the protocol, so we pick a random one. + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + + let a = vec![Scalar::from_u64(1); n]; + let b = vec![Scalar::from_u64(2); n]; let ones = vec![Scalar::from_u64(1); n]; - b.iter(|| { + let mut verifier = ProofTranscript::new(b"innerproducttest"); + + bench.iter(|| { Proof::create( &mut verifier, &Q, &ones, - G_vec.clone(), - H_vec.clone(), - a_vec.clone(), - b_vec.clone(), + G.clone(), + H.clone(), + a.clone(), + b.clone(), ) }); } - fn bench_helper_verify(n: usize, b: &mut Bencher) { - let mut verifier = ProofTranscript::new(b"innerproducttest"); - let G = &RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let H = &RistrettoPoint::hash_from_bytes::("there".as_bytes()); - let G_vec = make_generators(G, n); - let H_vec = make_generators(H, n); - - let a_vec = vec![Scalar::from_u64(1); n]; - let b_vec = vec![Scalar::from_u64(2); n]; + fn bench_helper_verify(n: usize, bench: &mut Bencher) { + let gens = Generators::new(n, 1); + let G = gens.share(0).G.to_vec(); + let H = gens.share(0).H.to_vec(); + // Q would be determined upstream in the protocol, so we pick a random one. let Q = RistrettoPoint::hash_from_bytes::(b"test point"); - let c = inner_product(&a_vec, &b_vec); - - let P = ristretto::vartime::multiscalar_mult( - a_vec.iter().chain(b_vec.iter()).chain(iter::once(&c)), - G_vec.iter().chain(H_vec.iter()).chain(iter::once(&Q)), - ); + let a = vec![Scalar::from_u64(1); n]; + let b = vec![Scalar::from_u64(2); n]; let ones = vec![Scalar::from_u64(1); n]; + let mut verifier = ProofTranscript::new(b"innerproducttest"); + let proof = Proof::create( &mut verifier, &Q, &ones, - G_vec.clone(), - H_vec.clone(), - a_vec.clone(), - b_vec.clone(), + G.clone(), + H.clone(), + a.clone(), + b.clone(), + ); + + let c = inner_product(&a, &b); + + let P = ristretto::vartime::multiscalar_mult( + a.iter().chain(b.iter()).chain(iter::once(&c)), + G.iter().chain(H.iter()).chain(iter::once(&Q)), ); let mut verifier = ProofTranscript::new(b"innerproducttest"); - b.iter(|| proof.verify(&mut verifier, &ones, &P, &Q, &G_vec, &H_vec)); + bench.iter(|| proof.verify(&mut verifier, &ones, &P, &Q, &G, &H)); } #[bench] diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index d75e7bde..2ffd1ddb 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -67,7 +67,9 @@ impl ProofTranscript { /// Begin a new, empty proof transcript, using the given `label` /// for domain separation. pub fn new(label: &[u8]) -> Self { - let mut ro = ProofTranscript { hash: Keccak::new_shake128() }; + let mut ro = ProofTranscript { + hash: Keccak::new_shake128(), + }; ro.commit(label); // makes sure the label is disambiguated from the rest of the messages. ro.pad(); @@ -80,7 +82,6 @@ impl ProofTranscript { /// /// Each message must be shorter than 64Kb (65536 bytes). pub fn commit(&mut self, message: &[u8]) { - let len = message.len(); if len > (u16::max_value() as usize) { panic!("Committed message must be less than 64Kb!"); @@ -110,7 +111,6 @@ impl ProofTranscript { /// Extracts an arbitrary-sized challenge byte slice. pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { - // XXX we rely on tiny_keccak experimental support for half-duplex mode and // correct switching from absorbing to squeezing and back. // Review this after this PR is merged or updated: diff --git a/src/range_proof.rs b/src/range_proof.rs index bda2516b..cd1565fc 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -19,6 +19,8 @@ use proof_transcript::ProofTranscript; use util; +use generators::{Generators, GeneratorsView}; + struct PolyDeg3(Scalar, Scalar, Scalar); struct VecPoly2(Vec, Vec); @@ -51,20 +53,23 @@ pub struct RangeProof { impl RangeProof { /// Create a rangeproof. pub fn generate_proof( + generators: GeneratorsView, transcript: &mut ProofTranscript, rng: &mut R, n: usize, v: u64, v_blinding: &Scalar, ) -> RangeProof { - // XXX move this out to a common generators module - let B = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let B_blinding = RistrettoPoint::hash_from_bytes::("there".as_bytes()); + let B = generators.B; + let B_blinding = generators.B_blinding; - let G = make_generators(&B, n); - let H = make_generators(&B_blinding, n); + // Create copies of G, H, so we can pass them to the + // (consuming) IPP API later. + let G = generators.G.to_vec(); + let H = generators.H.to_vec(); - let V = ristretto::multiscalar_mult(&[Scalar::from_u64(v), *v_blinding], &[B, B_blinding]); + let V = + ristretto::multiscalar_mult(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); let a_blinding = Scalar::random(rng); @@ -87,7 +92,7 @@ impl RangeProof { // Compute S = + + s_blinding * B_blinding. let S = ristretto::multiscalar_mult( iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), - iter::once(&B_blinding).chain(G.iter()).chain(H.iter()), + iter::once(B_blinding).chain(G.iter()).chain(H.iter()), ); // Commit to V, A, S and get challenges y, z @@ -123,8 +128,8 @@ impl RangeProof { // Form commitments T_1, T_2 to t.1, t.2 let t_1_blinding = Scalar::random(rng); let t_2_blinding = Scalar::random(rng); - let T_1 = ristretto::multiscalar_mult(&[t_poly.1, t_1_blinding], &[B, B_blinding]); - let T_2 = ristretto::multiscalar_mult(&[t_poly.2, t_2_blinding], &[B, B_blinding]); + let T_1 = ristretto::multiscalar_mult(&[t_poly.1, t_1_blinding], &[*B, *B_blinding]); + let T_2 = ristretto::multiscalar_mult(&[t_poly.2, t_2_blinding], &[*B, *B_blinding]); // Commit to T_1, T_2 to get the challenge point x transcript.commit(T_1.compress().as_bytes()); @@ -170,18 +175,13 @@ impl RangeProof { pub fn verify( &self, + gens: GeneratorsView, transcript: &mut ProofTranscript, rng: &mut R, n: usize, ) -> Result<(), ()> { - // XXX move this out to a common generators module - let B = RistrettoPoint::hash_from_bytes::("hello".as_bytes()); - let B_blinding = RistrettoPoint::hash_from_bytes::("there".as_bytes()); - - let G = make_generators(&B, n); - let H = make_generators(&B_blinding, n); - - // Replay the "interactive" protocol using the proof data to recompute all challenges. + // First, replay the "interactive" protocol using the proof + // data to recompute all challenges. transcript.commit(self.V.compress().as_bytes()); transcript.commit(self.A.compress().as_bytes()); @@ -210,7 +210,7 @@ impl RangeProof { (delta(n, &y, &z) - self.t_x), -self.t_x_blinding, ], - &[self.V, self.T_1, self.T_2, B, B_blinding], + &[self.V, self.T_1, self.T_2, *gens.B, *gens.B_blinding], ); if !poly_check.is_identity() { @@ -219,7 +219,9 @@ impl RangeProof { // Recompute P + t(x)Q = P + t(x)w B_blinding - let G_sum = G.iter() + // XXX later we will need to fold this into the IPP api + let G_sum = gens.G + .iter() .fold(RistrettoPoint::identity(), |acc, G_i| acc + G_i); let y_inv = y.invert(); @@ -236,11 +238,13 @@ impl RangeProof { .iter() .cloned() .chain(H_scalars), - [B_blinding, self.S, G_sum].iter().chain(H.iter()), + [*gens.B_blinding, self.S, G_sum] + .iter() + .chain(gens.H.iter()), ); // XXX eliminate this when merging into a single multiscalar mult - let Q = w * B_blinding; + let Q = w * gens.B_blinding; // Return the result of IPP verification using the recomputed P + t(x) Q self.ipp_proof.verify( @@ -248,8 +252,8 @@ impl RangeProof { util::exp_iter(y_inv).take(n), &P_plus_tx_Q, &Q, - &G, - &H, + gens.G, + gens.H, ) } } @@ -306,17 +310,6 @@ impl VecPoly2 { } } -pub fn make_generators(point: &RistrettoPoint, n: usize) -> Vec { - let mut generators = vec![RistrettoPoint::identity(); n]; - - generators[0] = RistrettoPoint::hash_from_bytes::(point.compress().as_bytes()); - for i in 1..n { - let prev = generators[i - 1].compress(); - generators[i] = RistrettoPoint::hash_from_bytes::(prev.as_bytes()); - } - generators -} - pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { let mut out = Scalar::zero(); if a.len() != b.len() { @@ -390,17 +383,29 @@ mod tests { } fn create_and_verify_helper(n: usize) { + let generators = Generators::new(n, 1); let mut transcript = ProofTranscript::new(b"RangeproofTest"); let mut rng = OsRng::new().unwrap(); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); - let rp = RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding); + let range_proof = RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ); let mut transcript = ProofTranscript::new(b"RangeproofTest"); - assert!(rp.verify(&mut transcript, &mut rng, n).is_ok()); + assert!( + range_proof + .verify(generators.share(0), &mut transcript, &mut rng, n) + .is_ok() + ); } #[test] @@ -431,27 +436,45 @@ mod bench { use test::Bencher; fn bench_create_helper(n: usize, b: &mut Bencher) { + let generators = Generators::new(n, 1); let mut transcript = ProofTranscript::new(b"RangeproofTest"); let mut rng = OsRng::new().unwrap(); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); - b.iter(|| RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding)); + b.iter(|| { + RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ) + }); } fn bench_verify_helper(n: usize, b: &mut Bencher) { + let generators = Generators::new(n, 1); let mut transcript = ProofTranscript::new(b"RangeproofTest"); let mut rng = OsRng::new().unwrap(); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); - let rp = RangeProof::generate_proof(&mut transcript, &mut rng, n, v, &v_blinding); + let rp = RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ); let mut transcript = ProofTranscript::new(b"RangeproofTest"); - b.iter(|| rp.verify(&mut transcript, &mut rng, n)); + b.iter(|| rp.verify(generators.share(0), &mut transcript, &mut rng, n)); } #[bench] From d7175d64627ff911a696129607d5c62ffa5a7557 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 15 Mar 2018 11:12:44 -0700 Subject: [PATCH 067/186] Benchmark proof creation and verification correctly. Each iteration requires a clean transcript. --- src/range_proof.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index cd1565fc..1cece64e 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -437,13 +437,15 @@ mod bench { fn bench_create_helper(n: usize, b: &mut Bencher) { let generators = Generators::new(n, 1); - let mut transcript = ProofTranscript::new(b"RangeproofTest"); let mut rng = OsRng::new().unwrap(); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + RangeProof::generate_proof( generators.share(0), &mut transcript, @@ -457,9 +459,9 @@ mod bench { fn bench_verify_helper(n: usize, b: &mut Bencher) { let generators = Generators::new(n, 1); - let mut transcript = ProofTranscript::new(b"RangeproofTest"); let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"RangeproofTest"); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); @@ -472,9 +474,12 @@ mod bench { &v_blinding, ); - let mut transcript = ProofTranscript::new(b"RangeproofTest"); + b.iter(|| { + // Each verification requires a clean transcript. + let mut transcript = ProofTranscript::new(b"RangeproofTest"); - b.iter(|| rp.verify(generators.share(0), &mut transcript, &mut rng, n)); + rp.verify(generators.share(0), &mut transcript, &mut rng, n) + }); } #[bench] From f9f1f964eac9b3199430e5e30482e14a83e9f2de Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 15 Mar 2018 11:59:14 -0700 Subject: [PATCH 068/186] fix tests --- src/range_proof.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index 1cece64e..de68b584 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -494,12 +494,12 @@ mod bench { #[bench] fn create_rp_16(b: &mut Bencher) { - bench_create_helper(32, b); + bench_create_helper(16, b); } #[bench] fn create_rp_8(b: &mut Bencher) { - bench_create_helper(32, b); + bench_create_helper(8, b); } #[bench] @@ -514,11 +514,11 @@ mod bench { #[bench] fn verify_rp_16(b: &mut Bencher) { - bench_verify_helper(32, b); + bench_verify_helper(16, b); } #[bench] fn verify_rp_8(b: &mut Bencher) { - bench_verify_helper(32, b); + bench_verify_helper(8, b); } } From 29e70cbc36f83d7339462adc672f902361a3ac82 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Mar 2018 11:36:43 -0700 Subject: [PATCH 069/186] Eliminate a loop by bit-twiddling hacks --- src/inner_product_proof.rs | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 74e94e19..6398c801 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -161,20 +161,15 @@ impl Proof { } let challenges_sq = challenges; - // j-th bit of i - let bit = |i, j| 1 & (i >> j); - let mut s = Vec::with_capacity(n); - for i in 0..n { - let mut s_i = allinv; - // XXX remove this loop via the bit twiddling mentioned in the paper - for j in 0..lg_n { - if bit(i, j) == 1 { - // The challenges are stored in "creation order" as [x_k,...,x_1] - s_i *= challenges_sq[(lg_n - 1) - j]; - } - } - s.push(s_i); + s.push(allinv); + for i in 1..n { + let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; + let k = 1 << lg_i; + // The challenges are stored in "creation order" as [x_k,...,x_1], + // so x_{lg(i)+1} = is indexed by (lg_n-1) - lg_i + let x_lg_i_sq = challenges_sq[(lg_n-1) - lg_i]; + s.push( s[i-k] * x_lg_i_sq ); } let s = s; From ccfe36e200357e16997d9a15e2e8de11a3b83c06 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Mar 2018 12:07:20 -0700 Subject: [PATCH 070/186] Split IPP verification scalars into helper function --- src/inner_product_proof.rs | 72 +++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 6398c801..0f343915 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -126,40 +126,40 @@ impl Proof { }; } - pub fn verify( + pub(crate) fn verification_scalars( &self, - verifier: &mut ProofTranscript, - Hprime_factors: I, - P: &RistrettoPoint, - Q: &RistrettoPoint, - G: &[RistrettoPoint], - H: &[RistrettoPoint], - ) -> Result<(), ()> - where - I: IntoIterator, - I::Item: Borrow, - { - // XXX prover should commit to n + transcript: &mut ProofTranscript, + ) -> (Vec, Vec, Vec) { let lg_n = self.L_vec.len(); let n = 1 << lg_n; - // XXX figure out how ser/deser works for Proofs - // maybe avoid this compression + // 1. Recompute x_k,...,x_1 based on the proof transcript + let mut challenges = Vec::with_capacity(lg_n); for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) { - verifier.commit(L.compress().as_bytes()); - verifier.commit(R.compress().as_bytes()); + // XXX maybe avoid this compression when proof ser/de is sorted out + transcript.commit(L.compress().as_bytes()); + transcript.commit(R.compress().as_bytes()); - challenges.push(verifier.challenge_scalar()); + challenges.push(transcript.challenge_scalar()); } - let mut inv_challenges = challenges.clone(); - let allinv = scalar::batch_invert(&mut inv_challenges); + // 2. Compute 1/(x_k...x_1) and 1/x_k, ..., 1/x_1 + + let mut challenges_inv = challenges.clone(); + let allinv = scalar::batch_invert(&mut challenges_inv); + + // 3. Compute x_i^2 and (1/x_i)^2 - for x in challenges.iter_mut() { - *x = &*x * &*x; // wtf + for i in 0..lg_n { + // XXX missing square fn upstream + challenges[i] = challenges[i] * challenges[i]; + challenges_inv[i] = challenges_inv[i] * challenges_inv[i]; } let challenges_sq = challenges; + let challenges_inv_sq = challenges_inv; + + // 4. Compute s values inductively. let mut s = Vec::with_capacity(n); s.push(allinv); @@ -168,10 +168,27 @@ impl Proof { let k = 1 << lg_i; // The challenges are stored in "creation order" as [x_k,...,x_1], // so x_{lg(i)+1} = is indexed by (lg_n-1) - lg_i - let x_lg_i_sq = challenges_sq[(lg_n-1) - lg_i]; - s.push( s[i-k] * x_lg_i_sq ); + let x_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; + s.push(s[i - k] * x_lg_i_sq); } - let s = s; + + (challenges_sq, challenges_inv_sq, s) + } + + pub fn verify( + &self, + transcript: &mut ProofTranscript, + Hprime_factors: I, + P: &RistrettoPoint, + Q: &RistrettoPoint, + G: &[RistrettoPoint], + H: &[RistrettoPoint], + ) -> Result<(), ()> + where + I: IntoIterator, + I::Item: Borrow, + { + let (x_sq, x_inv_sq, s) = self.verification_scalars(transcript); let a_times_s = s.iter().map(|s_i| self.a * s_i); @@ -183,9 +200,8 @@ impl Proof { .zip(inv_s) .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); - let neg_x_sq = challenges_sq.iter().map(|x| -x); - - let neg_x_inv_sq = inv_challenges.iter().map(|x_inv| -(x_inv * x_inv)); + let neg_x_sq = x_sq.iter().map(|xi| -xi); + let neg_x_inv_sq = x_inv_sq.iter().map(|xi| -xi); let expect_P = ristretto::vartime::multiscalar_mult( iter::once(self.a * self.b) From 6827a5fae4956958308e96bef91c8f755891ca99 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Mar 2018 14:11:22 -0700 Subject: [PATCH 071/186] Perform verification using a single multiscalar multiplication. --- src/inner_product_proof.rs | 8 +-- src/range_proof.rs | 99 ++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 56 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 0f343915..ebe02589 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -22,10 +22,10 @@ use sha2::Sha256; #[derive(Clone, Debug)] pub struct Proof { - L_vec: Vec, - R_vec: Vec, - a: Scalar, - b: Scalar, + pub(crate) L_vec: Vec, + pub(crate) R_vec: Vec, + pub(crate) a: Scalar, + pub(crate) b: Scalar, } impl Proof { diff --git a/src/range_proof.rs b/src/range_proof.rs index de68b584..4a41e6a6 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -147,7 +147,7 @@ impl RangeProof { // Get a challenge value to combine statements for the IPP let w = transcript.challenge_scalar(); - let Q = w * B_blinding; + let Q = w * B; // Generate the IPP proof let ipp_proof = inner_product_proof::Proof::create( @@ -189,6 +189,8 @@ impl RangeProof { let y = transcript.challenge_scalar(); let z = transcript.challenge_scalar(); + let zz = z * z; + let minus_z = -z; transcript.commit(self.T_1.compress().as_bytes()); transcript.commit(self.T_2.compress().as_bytes()); @@ -201,60 +203,53 @@ impl RangeProof { let w = transcript.challenge_scalar(); - // Check that t(x) is consistent with commitments V, T_1, T_2 - let poly_check = ristretto::vartime::multiscalar_mult( - &[ - z * z, - x, - x * x, - (delta(n, &y, &z) - self.t_x), - -self.t_x_blinding, - ], - &[self.V, self.T_1, self.T_2, *gens.B, *gens.B_blinding], + // Challenge value for batching statements to be verified + let c = Scalar::random(rng); + + let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); + let s_inv = s.iter().rev(); + + let a = self.ipp_proof.a; + let b = self.ipp_proof.b; + + let g = s.iter().map(|s_i| minus_z - a * s_i); + let h = s_inv + .zip(util::exp_iter(Scalar::from_u64(2))) + .zip(util::exp_iter(y.invert())) + .map(|((s_i_inv, exp_2), exp_y_inv)| z + exp_y_inv * (zz * exp_2 - b * s_i_inv)); + + let mega_check = ristretto::vartime::multiscalar_mult( + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(iter::once(c * z * z)) + .chain(iter::once(c * x)) + .chain(iter::once(c * x * x)) + .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) + .chain(iter::once( + w * (self.t_x - a * b) + c * (delta(n, &y, &z) - self.t_x), + )) + .chain(g) + .chain(h) + .chain(x_sq.iter().cloned()) + .chain(x_inv_sq.iter().cloned()), + iter::once(&self.A) + .chain(iter::once(&self.S)) + .chain(iter::once(&self.V)) + .chain(iter::once(&self.T_1)) + .chain(iter::once(&self.T_2)) + .chain(iter::once(gens.B_blinding)) + .chain(iter::once(gens.B)) + .chain(gens.G.iter()) + .chain(gens.H.iter()) + .chain(self.ipp_proof.L_vec.iter()) + .chain(self.ipp_proof.R_vec.iter()), ); - if !poly_check.is_identity() { - return Err(()); + if mega_check.is_identity() { + Ok(()) + } else { + Err(()) } - - // Recompute P + t(x)Q = P + t(x)w B_blinding - - // XXX later we will need to fold this into the IPP api - let G_sum = gens.G - .iter() - .fold(RistrettoPoint::identity(), |acc, G_i| acc + G_i); - - let y_inv = y.invert(); - let two_over_y = Scalar::from_u64(2) * y_inv; - let zz = z * z; - - let H_scalars = util::exp_iter(two_over_y) - .take(n) - .map(|exp_two_over_y| z + zz * exp_two_over_y); - - let P_plus_tx_Q = &self.A - + &ristretto::vartime::multiscalar_mult( - [w * self.t_x - self.e_blinding, x, -z] - .iter() - .cloned() - .chain(H_scalars), - [*gens.B_blinding, self.S, G_sum] - .iter() - .chain(gens.H.iter()), - ); - - // XXX eliminate this when merging into a single multiscalar mult - let Q = w * gens.B_blinding; - - // Return the result of IPP verification using the recomputed P + t(x) Q - self.ipp_proof.verify( - transcript, - util::exp_iter(y_inv).take(n), - &P_plus_tx_Q, - &Q, - gens.G, - gens.H, - ) } } From 9b7284fb5bd61c612343279fa87aabb97a79ed28 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 16 Mar 2018 16:57:02 -0700 Subject: [PATCH 072/186] Minor fixups --- src/range_proof.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/range_proof.rs b/src/range_proof.rs index 4a41e6a6..39d0efed 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -101,13 +101,13 @@ impl RangeProof { transcript.commit(S.compress().as_bytes()); let y = transcript.challenge_scalar(); let z = transcript.challenge_scalar(); + let zz = z * z; // Compute l, r let mut l_poly = VecPoly2::zero(n); let mut r_poly = VecPoly2::zero(n); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - let zz = z * z; for i in 0..n { let a_L_i = Scalar::from_u64((v >> i) & 1); @@ -138,7 +138,7 @@ impl RangeProof { // Evaluate t at x and run the IPP let t_x = t_poly.0 + x * (t_poly.1 + x * t_poly.2); - let t_x_blinding = z * z * v_blinding + x * (t_1_blinding + x * t_2_blinding); + let t_x_blinding = zz * v_blinding + x * (t_1_blinding + x * t_2_blinding); let e_blinding = a_blinding + x * s_blinding; transcript.commit(t_x.as_bytes()); @@ -221,7 +221,7 @@ impl RangeProof { let mega_check = ristretto::vartime::multiscalar_mult( iter::once(Scalar::one()) .chain(iter::once(x)) - .chain(iter::once(c * z * z)) + .chain(iter::once(c * zz)) .chain(iter::once(c * x)) .chain(iter::once(c * x * x)) .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) From 258f4a6d9ad1018bb45663be5b8c6e33238b29ab Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 22 Mar 2018 17:39:21 -0700 Subject: [PATCH 073/186] Update to dalek v0.16. The hash-to-ristretto procedure now takes 64 bytes of randomness (it applies Elligator twice), so the generators module is changed from Sha256 to Sha512. The test with hardcoded vectors is removed, since all the points changed, and checking that the points have specific hex encodings doesn't actually check that they're orthogonal. --- Cargo.toml | 4 +--- src/generators.rs | 43 +++----------------------------------- src/inner_product_proof.rs | 22 +++++++++---------- src/range_proof.rs | 12 +++++------ 4 files changed, 21 insertions(+), 60 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3941d6c4..dd474c37 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Cathie "] [dependencies] -curve25519-dalek = { version = "^0.15", features = ["nightly"] } +curve25519-dalek = { version = "^0.16", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" byteorder = "1.2.1" @@ -20,5 +20,3 @@ std = ["curve25519-dalek/std"] git = 'https://github.com/chain/tiny-keccak.git' rev = '5925f81b3c351440283c3328e2345d982aac0f6e' -[patch.crates-io] -curve25519-dalek = { git = 'https://github.com/dalek-cryptography/curve25519-dalek', branch = 'develop' } diff --git a/src/generators.rs b/src/generators.rs index 94bd77b0..eadeab15 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -23,7 +23,7 @@ // XXX we should use Sha3 everywhere use curve25519_dalek::ristretto::RistrettoPoint; -use sha2::{Digest, Sha256}; +use sha2::{Digest, Sha512}; /// The `GeneratorsChain` creates an arbitrary-long sequence of orthogonal generators. /// The sequence can be deterministically produced starting with an arbitrary point. @@ -34,7 +34,7 @@ struct GeneratorsChain { impl GeneratorsChain { /// Creates a chain of generators, determined by the hash of `label`. fn new(label: &[u8]) -> Self { - let mut hash = Sha256::default(); + let mut hash = Sha512::default(); hash.input(b"GeneratorsChainInit"); hash.input(label); let next_point = RistrettoPoint::from_hash(hash); @@ -52,7 +52,7 @@ impl Iterator for GeneratorsChain { type Item = RistrettoPoint; fn next(&mut self) -> Option { let current_point = self.next_point; - let mut hash = Sha256::default(); + let mut hash = Sha512::default(); hash.input(b"GeneratorsChainNext"); hash.input(current_point.compress().as_bytes()); self.next_point = RistrettoPoint::from_hash(hash); @@ -173,41 +173,4 @@ mod tests { [gens.share(2).G[..].to_vec(), gens.share(2).H[..].to_vec()] ); } - - #[test] - fn generator_orthogonality() { - let n = 2; - let m = 1; - let gens = Generators::new(n, m); - let view = gens.all(); - - assert_eq!( - hex::encode(RISTRETTO_BASEPOINT_POINT.compress().as_bytes()), - "e2f2ae0a6abc4e71a884a961c500515f58e30b6aa582dd8db6a65945e08d2d76" - ); - assert_eq!( - hex::encode(view.B.compress().as_bytes()), - "6abd9de445ed16637be32da51bbd3fa114f984c52081258a1f476c8493f09731" - ); - assert_eq!( - hex::encode(view.B_blinding.compress().as_bytes()), - "5c97d2b3cd6994ae1a4d6bd7371b40800b6a28afb1db14b81b4b5107ed9c5478" - ); - assert_eq!( - hex::encode(view.G[0].compress().as_bytes()), - "688bac289f5e4ed902648278b4e81a2b8a028365b0a7753fd0242e499bd6200e" - ); - assert_eq!( - hex::encode(view.G[1].compress().as_bytes()), - "7e49425c91464e4b3aa4c4676e7deba7e91d1cfd1a19a0a39dfd73b0cecdb55c" - ); - assert_eq!( - hex::encode(view.H[0].compress().as_bytes()), - "50140daade760912586d04be961dab5d723d1aba05b536b13b99f69225ea4002" - ); - assert_eq!( - hex::encode(view.H[1].compress().as_bytes()), - "ac23f3c0964e8bb1b9c61869edbb39c4417a96d518715d2e3e60a03cd722d13d" - ); - } } diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index ebe02589..45a9c6d8 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -18,7 +18,7 @@ use generators::Generators; use range_proof::inner_product; -use sha2::Sha256; +use sha2::Sha512; #[derive(Clone, Debug)] pub struct Proof { @@ -86,12 +86,12 @@ impl Proof { let c_L = inner_product(&a_L, &b_R); let c_R = inner_product(&a_R, &b_L); - let L = ristretto::vartime::multiscalar_mult( + let L = ristretto::vartime::multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), G_R.iter().chain(H_L.iter()).chain(iter::once(Q)), ); - let R = ristretto::vartime::multiscalar_mult( + let R = ristretto::vartime::multiscalar_mul( a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)), G_L.iter().chain(H_R.iter()).chain(iter::once(Q)), ); @@ -108,8 +108,8 @@ impl Proof { for i in 0..n { a_L[i] = a_L[i] * x + x_inv * a_R[i]; b_L[i] = b_L[i] * x_inv + x * b_R[i]; - G_L[i] = ristretto::vartime::multiscalar_mult(&[x_inv, x], &[G_L[i], G_R[i]]); - H_L[i] = ristretto::vartime::multiscalar_mult(&[x, x_inv], &[H_L[i], H_R[i]]); + G_L[i] = ristretto::vartime::multiscalar_mul(&[x_inv, x], &[G_L[i], G_R[i]]); + H_L[i] = ristretto::vartime::multiscalar_mul(&[x, x_inv], &[H_L[i], H_R[i]]); } a = a_L; @@ -203,7 +203,7 @@ impl Proof { let neg_x_sq = x_sq.iter().map(|xi| -xi); let neg_x_inv_sq = x_inv_sq.iter().map(|xi| -xi); - let expect_P = ristretto::vartime::multiscalar_mult( + let expect_P = ristretto::vartime::multiscalar_mul( iter::once(self.a * self.b) .chain(a_times_s) .chain(h_times_b_div_s) @@ -238,7 +238,7 @@ mod tests { let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); // a and b are the vectors for which we want to prove c = let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); @@ -257,7 +257,7 @@ mod tests { // a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime let a_prime = a.iter().cloned(); - let P = ristretto::vartime::multiscalar_mult( + let P = ristretto::vartime::multiscalar_mul( a_prime.chain(b_prime).chain(iter::once(c)), G.iter().chain(H.iter()).chain(iter::once(&Q)), ); @@ -319,7 +319,7 @@ mod bench { let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); let a = vec![Scalar::from_u64(1); n]; let b = vec![Scalar::from_u64(2); n]; @@ -346,7 +346,7 @@ mod bench { let H = gens.share(0).H.to_vec(); // Q would be determined upstream in the protocol, so we pick a random one. - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); + let Q = RistrettoPoint::hash_from_bytes::(b"test point"); let a = vec![Scalar::from_u64(1); n]; let b = vec![Scalar::from_u64(2); n]; @@ -366,7 +366,7 @@ mod bench { let c = inner_product(&a, &b); - let P = ristretto::vartime::multiscalar_mult( + let P = ristretto::vartime::multiscalar_mul( a.iter().chain(b.iter()).chain(iter::once(&c)), G.iter().chain(H.iter()).chain(iter::once(&Q)), ); diff --git a/src/range_proof.rs b/src/range_proof.rs index 39d0efed..bca0ee76 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -4,7 +4,7 @@ use rand::Rng; use std::iter; -use sha2::{Digest, Sha256, Sha512}; +use sha2::{Digest, Sha512}; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; @@ -69,7 +69,7 @@ impl RangeProof { let H = generators.H.to_vec(); let V = - ristretto::multiscalar_mult(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); + ristretto::multiscalar_mul(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); let a_blinding = Scalar::random(rng); @@ -90,7 +90,7 @@ impl RangeProof { let s_R: Vec<_> = (0..n).map(|_| Scalar::random(rng)).collect(); // Compute S = + + s_blinding * B_blinding. - let S = ristretto::multiscalar_mult( + let S = ristretto::multiscalar_mul( iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), iter::once(B_blinding).chain(G.iter()).chain(H.iter()), ); @@ -128,8 +128,8 @@ impl RangeProof { // Form commitments T_1, T_2 to t.1, t.2 let t_1_blinding = Scalar::random(rng); let t_2_blinding = Scalar::random(rng); - let T_1 = ristretto::multiscalar_mult(&[t_poly.1, t_1_blinding], &[*B, *B_blinding]); - let T_2 = ristretto::multiscalar_mult(&[t_poly.2, t_2_blinding], &[*B, *B_blinding]); + let T_1 = ristretto::multiscalar_mul(&[t_poly.1, t_1_blinding], &[*B, *B_blinding]); + let T_2 = ristretto::multiscalar_mul(&[t_poly.2, t_2_blinding], &[*B, *B_blinding]); // Commit to T_1, T_2 to get the challenge point x transcript.commit(T_1.compress().as_bytes()); @@ -218,7 +218,7 @@ impl RangeProof { .zip(util::exp_iter(y.invert())) .map(|((s_i_inv, exp_2), exp_y_inv)| z + exp_y_inv * (zz * exp_2 - b * s_i_inv)); - let mega_check = ristretto::vartime::multiscalar_mult( + let mega_check = ristretto::vartime::multiscalar_mul( iter::once(Scalar::one()) .chain(iter::once(x)) .chain(iter::once(c * zz)) From 2d16bb187506d93abf9d7a984bed517d34a5fffa Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 22 Mar 2018 17:22:06 -0700 Subject: [PATCH 074/186] Replace existing libtest benchmarks with Criterion benchmarks. --- Cargo.toml | 5 ++ benches/bulletproofs.rs | 111 +++++++++++++++++++++++++++++++++++++ src/inner_product_proof.rs | 99 --------------------------------- src/range_proof.rs | 93 ------------------------------- 4 files changed, 116 insertions(+), 192 deletions(-) create mode 100644 benches/bulletproofs.rs diff --git a/Cargo.toml b/Cargo.toml index dd474c37..9689c73d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ byteorder = "1.2.1" [dev-dependencies] hex = "^0.3" +criterion = "0.2" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] @@ -20,3 +21,7 @@ std = ["curve25519-dalek/std"] git = 'https://github.com/chain/tiny-keccak.git' rev = '5925f81b3c351440283c3328e2345d982aac0f6e' +[[bench]] +name = "bulletproofs" +harness = false + diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs new file mode 100644 index 00000000..7f109aec --- /dev/null +++ b/benches/bulletproofs.rs @@ -0,0 +1,111 @@ +#[macro_use] +extern crate criterion; +use criterion::Criterion; + +extern crate rand; +use rand::{OsRng, Rng}; + +extern crate curve25519_dalek; +use curve25519_dalek::scalar::Scalar; + +extern crate ristretto_bulletproofs; +use ristretto_bulletproofs::generators::{Generators, GeneratorsView}; +use ristretto_bulletproofs::proof_transcript::ProofTranscript; +use ristretto_bulletproofs::RangeProof; + +fn bench_create_helper(n: usize, c: &mut Criterion) { + c.bench_function(&format!("create_rangeproof_n_{}", n), move |b| { + let generators = Generators::new(n, 1); + let mut rng = OsRng::new().unwrap(); + + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + + RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ) + }) + }); +} + +fn bench_verify_helper(n: usize, c: &mut Criterion) { + c.bench_function(&format!("verify_rangeproof_n_{}", n), move |b| { + let generators = Generators::new(n, 1); + let mut rng = OsRng::new().unwrap(); + + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + let rp = RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ); + + b.iter(|| { + // Each verification requires a clean transcript. + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + + rp.verify(generators.share(0), &mut transcript, &mut rng, n) + }); + }); +} + +fn create_rp_64(c: &mut Criterion) { + bench_create_helper(64, c); +} + +fn create_rp_32(c: &mut Criterion) { + bench_create_helper(32, c); +} + +fn create_rp_16(c: &mut Criterion) { + bench_create_helper(16, c); +} + +fn create_rp_8(c: &mut Criterion) { + bench_create_helper(8, c); +} + +criterion_group!{ + name = create_rp; + config = Criterion::default(); + targets = create_rp_8, create_rp_16, create_rp_32, create_rp_64 +} + +fn verify_rp_64(c: &mut Criterion) { + bench_verify_helper(64, c); +} + +fn verify_rp_32(c: &mut Criterion) { + bench_verify_helper(32, c); +} + +fn verify_rp_16(c: &mut Criterion) { + bench_verify_helper(16, c); +} + +fn verify_rp_8(c: &mut Criterion) { + bench_verify_helper(8, c); +} + +criterion_group!{ + name = verify_rp; + config = Criterion::default(); + targets = verify_rp_8, verify_rp_16, verify_rp_32, verify_rp_64 +} + +criterion_main!(create_rp, verify_rp); diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 45a9c6d8..f9b29670 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -306,102 +306,3 @@ mod tests { test_helper_create(64); } } - -#[cfg(test)] -mod bench { - - use super::*; - use test::Bencher; - - fn bench_helper_create(n: usize, bench: &mut Bencher) { - let gens = Generators::new(n, 1); - let G = gens.share(0).G.to_vec(); - let H = gens.share(0).H.to_vec(); - - // Q would be determined upstream in the protocol, so we pick a random one. - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); - - let a = vec![Scalar::from_u64(1); n]; - let b = vec![Scalar::from_u64(2); n]; - let ones = vec![Scalar::from_u64(1); n]; - - let mut verifier = ProofTranscript::new(b"innerproducttest"); - - bench.iter(|| { - Proof::create( - &mut verifier, - &Q, - &ones, - G.clone(), - H.clone(), - a.clone(), - b.clone(), - ) - }); - } - - fn bench_helper_verify(n: usize, bench: &mut Bencher) { - let gens = Generators::new(n, 1); - let G = gens.share(0).G.to_vec(); - let H = gens.share(0).H.to_vec(); - - // Q would be determined upstream in the protocol, so we pick a random one. - let Q = RistrettoPoint::hash_from_bytes::(b"test point"); - - let a = vec![Scalar::from_u64(1); n]; - let b = vec![Scalar::from_u64(2); n]; - let ones = vec![Scalar::from_u64(1); n]; - - let mut verifier = ProofTranscript::new(b"innerproducttest"); - - let proof = Proof::create( - &mut verifier, - &Q, - &ones, - G.clone(), - H.clone(), - a.clone(), - b.clone(), - ); - - let c = inner_product(&a, &b); - - let P = ristretto::vartime::multiscalar_mul( - a.iter().chain(b.iter()).chain(iter::once(&c)), - G.iter().chain(H.iter()).chain(iter::once(&Q)), - ); - - let mut verifier = ProofTranscript::new(b"innerproducttest"); - bench.iter(|| proof.verify(&mut verifier, &ones, &P, &Q, &G, &H)); - } - - #[bench] - fn create_n_eq_64(b: &mut Bencher) { - bench_helper_create(64, b); - } - - #[bench] - fn create_n_eq_32(b: &mut Bencher) { - bench_helper_create(32, b); - } - - #[bench] - fn create_n_eq_16(b: &mut Bencher) { - bench_helper_create(16, b); - } - - #[bench] - fn verify_n_eq_64(b: &mut Bencher) { - bench_helper_verify(64, b); - } - - #[bench] - fn verify_n_eq_32(b: &mut Bencher) { - bench_helper_verify(32, b); - } - - #[bench] - fn verify_n_eq_16(b: &mut Bencher) { - bench_helper_verify(16, b); - } -} diff --git a/src/range_proof.rs b/src/range_proof.rs index bca0ee76..ba2e58cb 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -424,96 +424,3 @@ mod tests { } } -#[cfg(test)] -mod bench { - use super::*; - use rand::{OsRng, Rng}; - use test::Bencher; - - fn bench_create_helper(n: usize, b: &mut Bencher) { - let generators = Generators::new(n, 1); - let mut rng = OsRng::new().unwrap(); - - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); - - b.iter(|| { - // Each proof creation requires a clean transcript. - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - - RangeProof::generate_proof( - generators.share(0), - &mut transcript, - &mut rng, - n, - v, - &v_blinding, - ) - }); - } - - fn bench_verify_helper(n: usize, b: &mut Bencher) { - let generators = Generators::new(n, 1); - let mut rng = OsRng::new().unwrap(); - - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); - - let rp = RangeProof::generate_proof( - generators.share(0), - &mut transcript, - &mut rng, - n, - v, - &v_blinding, - ); - - b.iter(|| { - // Each verification requires a clean transcript. - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - - rp.verify(generators.share(0), &mut transcript, &mut rng, n) - }); - } - - #[bench] - fn create_rp_64(b: &mut Bencher) { - bench_create_helper(64, b); - } - - #[bench] - fn create_rp_32(b: &mut Bencher) { - bench_create_helper(32, b); - } - - #[bench] - fn create_rp_16(b: &mut Bencher) { - bench_create_helper(16, b); - } - - #[bench] - fn create_rp_8(b: &mut Bencher) { - bench_create_helper(8, b); - } - - #[bench] - fn verify_rp_64(b: &mut Bencher) { - bench_verify_helper(64, b); - } - - #[bench] - fn verify_rp_32(b: &mut Bencher) { - bench_verify_helper(32, b); - } - - #[bench] - fn verify_rp_16(b: &mut Bencher) { - bench_verify_helper(16, b); - } - - #[bench] - fn verify_rp_8(b: &mut Bencher) { - bench_verify_helper(8, b); - } -} From 0338abf85d44c5f28e7a863ec5847fd926b053e0 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 28 Mar 2018 11:23:58 -0700 Subject: [PATCH 075/186] Import some of the TeX notes into Markdown+KaTeX. Notes on how this hell process works: This is kind of terrible because there's a bunch of stuff that KaTeX doesn't support (e.g., macros with arguments), so it's a real pain to import the notes. To do this, start with pandoc: pandoc -f latex -t markdown input.tex -o output.md Pandoc uses $...$ for inline math and $$...$$ for display math, but we want \(...\) and \[...\]. This means we actually need \\( and \\) in the Markdown in order to get \( \) in the generated HTML. To do this conversion, try (in vim) :%s/\$\$\(\_.\{-}\)\$\$/\\\\[\r\1\r\\\\]/gc to change $$...$$ to \[ newline ... newline \]. Note that \_ in the match means "match across lines" and \{-} means "*, but non-greedy", while \r is used in the substitution for a newline because Vim thinks \n should mean null byte (but only when substituting, not when matching). Next replace $s with :%s/\$\(\_.\{-}[^\\]\)\$/\\\\(\1\\\\)/gc the extra [^\\] is to prevent matching \$ as an endpoint of a $...$. Finally, fix up errors. --- Makefile | 8 + docs/assets/rustdoc-include-katex-header.html | 17 + docs/notes.md | 545 ++++++++++++++++++ src/lib.rs | 5 + 4 files changed, 575 insertions(+) create mode 100644 Makefile create mode 100644 docs/assets/rustdoc-include-katex-header.html create mode 100644 docs/notes.md diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..095ca5a5 --- /dev/null +++ b/Makefile @@ -0,0 +1,8 @@ +FEATURES := + +doc: + cargo rustdoc --features "$(FEATURES)" -- --html-in-header docs/assets/rustdoc-include-katex-header.html + +doc-internal: + cargo rustdoc --features "$(FEATURES)" -- --html-in-header docs/assets/rustdoc-include-katex-header.html --document-private-items + diff --git a/docs/assets/rustdoc-include-katex-header.html b/docs/assets/rustdoc-include-katex-header.html new file mode 100644 index 00000000..51232f44 --- /dev/null +++ b/docs/assets/rustdoc-include-katex-header.html @@ -0,0 +1,17 @@ + + + + + diff --git a/docs/notes.md b/docs/notes.md new file mode 100644 index 00000000..b443f391 --- /dev/null +++ b/docs/notes.md @@ -0,0 +1,545 @@ +This module contains notes on how and why Bulletproofs work. + +The documentation is laid out roughly as follows. General notes on +the range proof and inner-product proofs are here. The description of +each protocol is contained in the respective `range_proof` and +`inner_product_proof` modules. Finally, structs from those modules +are publicly re-exported from the crate root, so that the external +documentation describes how to use the API, while the internal +documentation describes how it works. + +Notation +======== + +We change notation from the original Bulletproofs paper. The primary +motivation is that our implementation uses additive notation, and we +would like our description of the protocol to use the same notation as +the implementation. + +In general, we use lower-case letters +\\(a, b, c\\) +for scalars in +\\({\mathbb Z\_p}\\) +and upper-case letters +\\(G,H,P,Q\\) +for group elements in +\\({\mathbb G}\\). +Vectors are denoted as \\({\mathbf{a}}\\) and \\({\mathbf{G}}\\), +and the inner product of two vectors is denoted by +\\({\langle -, - \rangle}\\). Notice that +\\({\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \in {\mathbb Z\_p}\\) +produces a scalar, while +\\({\langle {\mathbf{a}}, {\mathbf{G}} \rangle} \in {\mathbb G}\\) +is a multiscalar multiplication. The vectors of all \\(0\\) and all \\(1\\) are +denoted by \\({\mathbf{0}}\\), \\({\mathbf{1}}\\) respectively. + +Vectors are indexed starting from \\(0\\), unlike the paper, which indexes +from \\(1\\). For a scalar \\(y\\), we write +\\[ +\begin{aligned} + {\mathbf{y}}^{n} &= (1,y,y^{2},\ldots,y^{n-1}) +\end{aligned} +\\] +for the vector whose \\(i\\)-th entry is \\(y^{i}\\). For vectors +\\({\mathbf{v}}\\) of even +length \\(2k\\), we define \\({\mathbf{v}}\_{\operatorname{lo}}\\) and +\\({\mathbf{v}}\_{\operatorname{hi}}\\) to be the low and high halves of +\\({\mathbf{v}}\\): +\\[ +\begin{aligned} + {\mathbf{v}}\_{\operatorname{lo}} &= (v\_0, \ldots, v\_{k-1})\\\\ + {\mathbf{v}}\_{\operatorname{hi}} &= (v\_{k}, \ldots, v\_{2k-1}) +\end{aligned} +\\] +Pedersen commitments are written as +\\[ +\begin{aligned} + \operatorname{Com}(v) &= \operatorname{Com}(v, {\widetilde{v}}) = v \cdot B + {\widetilde{v}} \cdot {\widetilde{B}}, +\end{aligned} +\\] +where \\(B\\) and \\({\widetilde{B}}\\) are the generators used for the values +and blinding factors, respectively. We denote the blinding factor for +the value \\(v\\) by \\({\widetilde{v}}\\), so that it is clear which blinding +factor corresponds to which value, and write \\(\operatorname{Com}(v)\\) +instead of \\(\operatorname{Com}(v, {\widetilde{v}})\\) for brevity. + +We also make use of *vector Pedersen commitments*, which we define for +pairs of vectors as \\[ +\begin{aligned} + \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}) + &= \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}}) + = {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_{R}, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}},\end{aligned} +\\] +where \\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) are vectors of generators. +Notice that this is exactly the same as taking a commitment to the +vector of values \\({\mathbf{a}}\_{L} \Vert {\mathbf{a}}\_{R}\\) with the +vector of bases \\({\mathbf{G}} \Vert {\mathbf{H}}\\), but defining the +commitment on pairs of vectors is a more convenient notation. + +Decoder Ring +------------ + +Mapping from paper notation to this notation: +\\[ +\begin{aligned} + g^a &\xrightarrow{} a \cdot G\\\\ + g \cdot h &\xrightarrow{} G + H\\\\ + g^a \cdot h^y &\xrightarrow{} a \cdot G + y \cdot H\\\\ + {\mathbf{g}}^{{\mathbf{a}}} &\xrightarrow{} {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} = {\textstyle\sum a\_i \cdot G\_i}\end{aligned} +\\] +Variables: +\\[ +\begin{aligned} + g &\xrightarrow{} B & \gamma &\xrightarrow{} \tilde{v} \\\\ + h &\xrightarrow{} \tilde{B} & \alpha &\xrightarrow{} \tilde{a} \\\\ + {\mathbf{g}} &\xrightarrow{} {\mathbf{G}} & \rho &\xrightarrow{} \tilde{s} \\\\ + {\mathbf{h}} &\xrightarrow{} {\mathbf{H}} & \tau\_i &\xrightarrow{} \tilde{t}\_i \\\\ + & & \mu &\xrightarrow{} \tilde{e} \\\\ +\end{aligned} +\\] + +Range Proofs from Inner Products +================================ + +The goal of a *range proof* is for a *prover* to convince a *verifier* +that a particular value \\(v\\) lies within a valid range, without revealing +any additional information about the value \\(v\\). + +The prover begins with a secret value \\(v\\) and commitment +\\(V = \operatorname{Com}(v)\\), which it sends to the verifier. The prover +wishes to convince the verifier that +\\[ +\begin{aligned} + v &\in [0, 2^{n}) +\end{aligned} +\\] +without revealing \\(v\\). + +Since the prover will eventually use an inner product proof to do this, +we want to work towards expressing this condition +in terms of a single inner product. In this section, we construct +successive statements which imply \\(v \in [0,2^{n})\\) +until we arrive at the ones the prover will use to convince +the verifier. + +Proving range statements with bit vectors +----------------------------------------- + +Write the bits of \\(v\\) as \\({\mathbf{a}}\\). If \\({\mathbf{2}}^{n}\\) is the +vector \\((1,2,4,\ldots,2^{n-1})\\) of powers of \\(2\\), then +\\[ +\begin{aligned} + {\langle {\mathbf{a}}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) &= {\mathbf{0}}^{n} . +\end{aligned} +\\] +Here \\({\mathbf{x}} \circ {\mathbf{y}}\\) denotes the entry-wise +multiplication of two vectors. +Together, these conditions imply the range condition. +We will +eventually need to make separate commitments to the vectors +\\({\mathbf{a}}\\) and \\({\mathbf{a}} - {\mathbf{1}}\\), so we set +\\({\mathbf{a}}\_{L} = {\mathbf{a}}\\), +\\({\mathbf{a}}\_{R} = {\mathbf{a}} - {\mathbf{1}}\\) to obtain +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\mathbf{a}}\_{L} \circ {\mathbf{a}}\_{R} &= {\mathbf{0}}, \\\\ + ({\mathbf{a}}\_{L} - {\mathbf{1}}) - {\mathbf{a}}\_{R} &= {\mathbf{0}}. +\end{aligned} +\\] + +Proving vectors of statements with a single statement +----------------------------------------------------- + +The statements above are statements about vectors, or equivalently, a +vector of statements about each entry. Now, we want to combine these +into a single statement. Since \\({\mathbf{b}} = {\mathbf{0}}\\) if and only +if \\({\langle {\mathbf{b}}, {\mathbf{y}}^{n} \rangle} = 0\\) for every \\(y\\), +the statements above are implied by +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ + {\langle {\mathbf{a}}\_{L} - {\mathbf{1}} - {\mathbf{a}}\_{R}, {\mathbf{y}}^{n} \rangle} &= 0, \\\\ + {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} &= 0 +\end{aligned} +\\] +for the verifier’s choice of a challenge value \\(y\\). These statements can +then be combined in the same way, using the verifier’s choice of \\(z\\): +\\[ +\begin{aligned} +z^{2} v +&= + z^{2} {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^{n} \rangle} + + z {\langle {\mathbf{a}}\_{L} - {\mathbf{1}} - {\mathbf{a}}\_{R}, {\mathbf{y}}^{n} \rangle} + + {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\end{aligned} +\\] + +Combining inner-products +------------------------ + +Finally, we want to combine these terms into a single inner product. Our +goal is to rearrange the inner product above so that terms +involving \\({\mathbf{a}}\_{L}\\) appear only on the left-hand side, terms +involving \\({\mathbf{a}}\_{R}\\) appear only on the right-hand side, and +non-secret terms (which the verifier can compute on its own) are +factored out into a new term \\(\delta\\). + +First, break the statement into simpler terms, then rearrange: +\\[ +\begin{aligned} +z^2 v +&= +z^2 {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^n \rangle} +\+ z {\langle {\mathbf{a}}\_{L}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{a}}\_{R}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{1}}, {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +z^2 {\langle {\mathbf{a}}\_{L}, {\mathbf{2}}^n \rangle} +\+ z {\langle {\mathbf{a}}\_{L}, {\mathbf{y}}^n \rangle} +\- z {\langle {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, z {\mathbf{y}}^n \rangle} +\+ {\langle -z {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\+ {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\+ {\langle -z {\mathbf{1}} , {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\end{aligned} +\\] +To combine the terms on the right-hand side, add +\\({\langle -z {\mathbf{1}}, z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n \rangle}\\) +to each side, then simplify: +\\[ +\begin{aligned} +z^{2} v +\+ z {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} +\- {\langle z {\mathbf{1}}, z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n \rangle} +&= +{\langle {\mathbf{a}}\_{L}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\\\\ +&+ {\langle -z {\mathbf{1}} , z^2 {\mathbf{2}}^n + z {\mathbf{y}}^n + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^n \rangle} +\\\\ +z^2 v +\+ (z - z^2) {\langle {\mathbf{1}}, {\mathbf{y}}^n \rangle} +\- z^3 {\langle {\mathbf{1}}, {\mathbf{2}}^n \rangle} +&= +{\langle {\mathbf{a}}\_{L} - z{\mathbf{1}}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} +\end{aligned} +\\] +Writing +\\[ + \delta(y,z) = (z - z^{2}) {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} - z^{3} {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}, +\\] +we finally obtain +\\[ + z^{2}v + \delta(y,z) = {\langle {\mathbf{a}}\_{L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} \rangle}. +\\] +This is equivalent to the original inner-product equation, but has a single +inner product with \\({\mathbf{a}}\_{L}\\) on the left, \\({\mathbf{a}}\_{R}\\) on +the right, and non-secret terms factored out. + +Blinding the inner product +-------------------------- + +The prover cannot send the left and right vectors in +the single inner-product equation to the verifier without revealing information +about the value \\(v\\), and since the inner-product argument is not +zero-knowledge, they cannot be used there either. + +Instead, the prover chooses vectors of blinding factors +\\[ +{\mathbf{s}}\_{L}, {\mathbf{s}}\_{R} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n}, +\\] +and uses them to construct vector polynomials +\\[ +\begin{aligned} + {\mathbf{l}}(x) &= {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x = ({\mathbf{a}}\_{L} + {\mathbf{s}}\_{L} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x = {\mathbf{y}}^{n} \circ \left( ({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\right) + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} +\end{aligned} +\\] +These are the left and right sides of the inner product +(\[eqn:single\\_inner\]), with \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\) +replaced by blinded terms \\({\mathbf{a}}\_{L} + {\mathbf{s}}\_{L} x\\), +\\({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\\). Notice that since only the +blinding factors \\({\mathbf{s}}\_{L}\\), \\({\mathbf{s}}\_{R}\\) are multiplied +by \\(x\\), the vectors \\({\mathbf{l}}\_{0}\\) and \\({\mathbf{r}}\_{0}\\) are +exactly the left and right sides of the unblinded single inner-product. + +Setting +\\[ + t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} = t\_{0} + t\_{1} x + t\_{2} x^{2}, +\\] +we can express the coefficients of \\(t(x)\\) using Karatsuba’s method: +\\[ +\begin{aligned} + t\_{0} &= {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle}, \\\\ + t\_{2} &= {\langle {\mathbf{l}}\_{1}, {\mathbf{r}}\_{1} \rangle}, \\\\ + t\_{1} &= {\langle {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1}, {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} \rangle} - t\_{0} - t\_{2} +\end{aligned} +\\] +Since \\[ +\begin{aligned} + t\_{0} &= {\langle {\mathbf{a}}\_{L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} 2^{n} \rangle},\end{aligned} +\\] +for the prover to convince the verifier that the unblinded single inner-product equation +holds, it’s enough to prove that the constant term \\(t\_{0}\\) of \\(t(x)\\) is +\\(z^{2} v + \delta(y,z)\\), and that +this \\(t(x)\\) is the correct polynomial. +Proving that \\(t(x)\\) is correct means proving that +\\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correctly formed, and that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). + +Proving that \\(t\_{0}\\) is correct +------------------------------------ + +In order to prove that the constant term of \\(t(x)\\) is +\\(z^{2} v + \delta(y,z)\\), the prover first forms a commitment to the +coefficients of \\(t(x)\\), then convinces the verifier that these commit to +the correct \\(t(x)\\) by evaluating the polynomial at a challenge point +\\(x\\). + +The prover has already used \\(V = \operatorname{Com}(v)\\) to commit to \\(v\\) +(and hence to \\(t\_{0}\\)), so the prover forms commitments +\\(T\_{1} = \operatorname{Com}(t\_{1})\\) and +\\(T\_{2} = \operatorname{Com}(t\_{2})\\), then sends these to the verifier. +The commitments \\(V\\), \\(T\_{1}\\), \\(T\_{2}\\) are related to each other and to +\\(t(x)\\) by the following diagram: +\\[ +\begin{aligned} + t(x) B &\quad &= \quad & z^{2}vB & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x t\_{1} B &\quad &+\quad & x^2 t\_{2} B \\\\ + + &\quad & \quad & + & \quad & \quad & + & \quad & \quad& + &\quad & \quad & + \\\\ + {\widetilde{t}}(x) {\widetilde{B}} &\quad &= \quad & z^2 {\widetilde{v}} {\widetilde{B}} & \quad &+ \quad & 0 {\widetilde{B}} & \quad &+ \quad& x {\widetilde{t}}\_{1} {\widetilde{B}} &\quad &+\quad & x^{2} {\widetilde{t}}\_{2} {\widetilde{B}} \\\\ + \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel & \quad & \quad& \shortparallel &\quad & \quad & \shortparallel \\\\ + &\quad &= \quad & z V & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x T\_{1} &\quad &+\quad & x^{2} T\_{2} +\end{aligned} +\\] +Notice that the sum of each column is a commitment to the variable in the top +row using the blinding factor in the second row. +The sum of all of the columns is +\\(t(x) B + {\widetilde{t}}(x) {\widetilde{B}}\\), a commitment to the value +of \\(t\\) at the point \\(x\\), using the synthetic blinding factor[^1] +\\[ + {\widetilde{t}}(x) = z^{2} {\widetilde{v}} + x {\widetilde{t}}\_{1} + x^{2} {\widetilde{t}}\_{2}. +\\] +To convince the verifier that +\\(t(x) = z^2v + \delta(y,z) + t\_{1} x + t\_{2} x^{2}\\), the prover sends +the opening \\(t(x), {\widetilde{t}}(x)\\) to the verifier, who uses the +bottom row of the diagram to check consistency: +\\[ + t(x) B + {\widetilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. +\\] + +Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct +--------------------------------------------------------------------- + +We want to relate \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) to commitments +to \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\), \\({\mathbf{s}}\_{L}\\), and +\\({\mathbf{s}}\_{R}\\). However, since \\[ +\begin{aligned} + {\mathbf{r}}(x) &= {\mathbf{y}}^{n} \circ \left( ({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\right) + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n},\end{aligned} +\\] +we need commitments to \\({\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}\\) and +\\({\mathbf{y}}^{n} \circ {\mathbf{s}}\_{R}\\). However, since the prover +must form commitments before receiving the verifier’s challenge \\(y\\), the +prover can only commit to \\(a\_{R}\\) and \\(s\_{R}\\). Since the prover’s +commitments are to \\(a\_{R}\\) and \\(s\_{R}\\), the verifier needs to transmute +the prover’s commitment +\\( +\operatorname{Com}({\mathbf{a}}\_{L},{\mathbf{a}}\_{R}, {\widetilde{a}}) +\\) +into a commitment +\\( +\operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\widetilde{a}}) +\\) +(and similarly for \\({\mathbf{s}}\_{R}\\)). +To do this, notice that +\\[ +\begin{aligned} + \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}}) + &= + {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_{R}, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}} \\\\ + &= + {\langle {\mathbf{a}}\_{L}, {\mathbf{G}} \rangle} + {\langle {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\mathbf{y}}^{-n} \circ {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}}, +\end{aligned} +\\] +so that by changing generators to +\\({\mathbf{H}}' = {\mathbf{y}}^{-n} \circ {\mathbf{H}}\\), the point which +is a commitment to +\\(({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}, {\widetilde{a}})\\) with respect to +\\(({\mathbf{G}}, {\mathbf{H}}, {\widetilde{a}})\\) is transmuted into a +commitment to +\\(({\mathbf{a}}\_{L}, {\mathbf{y}}^{n} \circ {\mathbf{a}}\_{R}, {\widetilde{a}})\\) +with respect to \\(({\mathbf{G}}, {\mathbf{H}}', {\widetilde{a}})\\). + +To relate the prover’s commitments +\\(A = \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R})\\) and +\\(S = \operatorname{Com}({\mathbf{s}}\_{L}, {\mathbf{s}}\_{R})\\) to +\\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\), we use the following diagram: +\\[ +\begin{aligned} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} &\quad &= \quad & {\langle {\mathbf{a}}\_L, {\mathbf{G}} \rangle} & \quad &+ \quad & x {\langle {\mathbf{s}}\_L, {\mathbf{G}} \rangle} &\quad &+\quad & {\langle -z{\mathbf{1}}, {\mathbf{G}} \rangle} \\\\ + + &\quad & \quad & + & \quad & \quad & + &\quad & \quad & + \\\\ + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle} &\quad &= \quad & {\langle {\mathbf{a}}\_R, {\mathbf{H}} \rangle} & \quad &+ \quad & x {\langle {\mathbf{s}}\_R, {\mathbf{H}} \rangle} &\quad &+\quad & {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} \\\\ + + &\quad & \quad & + & \quad & \quad & + &\quad & \quad & \\\\ + {\widetilde{e}} {\widetilde{B}} &\quad &= \quad & {\widetilde{a}} {\widetilde{B}} & \quad &+ \quad & x {\widetilde{s}} {\widetilde{B}} &\quad & \quad & \\\\ + \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel &\quad & \quad & \shortparallel \\\\ + &\quad &= \quad & A & \quad &+ \quad & x S &\quad &+\quad & {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} +\end{aligned} +\\] +We can interpret the rows and columns similarly to the previous diagram: +the sum of each column is a vector Pedersen commitment with left and right halves from the first and second rows respectively +and blinding factor from the third row. +The sum of all of the columns is a vector +Pedersen commitment to \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) with +synthetic blinding factor \\({\widetilde{e}}\\). + +To convince the verifier that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\), the prover +sends \\({\widetilde{e}}\\) to the verifier, who uses the bottom row +to compute +\\[ + P = -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}; +\\] +if the prover is honest, this is +\\(P = {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle}\\), +so the verifier uses \\(P\\), \\(t(x)\\) as inputs to the inner-product protocol +to prove that +\\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). + +Inner product argument protocol +=============================== + +We want to prove the relation +\\[ +\operatorname{PK}\left\\{ + ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P, Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) + : P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q +\right\\} +\\] where \\(n = 2^{k}\\) is a power of \\(2\\). + +To start, we sketch the +interactive version of this protocol, and then describe the +optimizations discussed in the Bulletproofs paper for the +non-interactive version. + +The protocol consists of \\(k = \lg n\\) rounds, indexed by +\\(j = k,\ldots,1\\). In the \\(j\\)-th round, the prover computes +\\[ +\begin{aligned} + L\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{G}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{hi}}, {\mathbf{H}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{b}}\_{\operatorname{hi}} \rangle} Q, \\\\ + R\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{G}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{lo}}, {\mathbf{H}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{b}}\_{\operatorname{lo}} \rangle} Q, +\end{aligned} +\\] +and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a +challenge value \\(x\_{j} {\xleftarrow{\$}}{\mathbb Z\_p}\\). The prover uses +\\(x\_{j}\\) to compute +\\[ +\begin{aligned} + {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ + {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, +\end{aligned} +\\] +the prover and verifier both compute +\\[ +\begin{aligned} + {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ + {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, +\end{aligned} +\\] +and use these vectors (all of length \\(2^{j-1}\\)) for the next round. +After the last (\\(j = 1\\)) round, the prover sends +\\(a, b = {\mathbf{a}}\_{0}, {\mathbf{b}}\_{0}\\) to the verifier, who accepts +if and only if +\\[ +\begin{aligned} +L\_{1} x\_{1}^{2} + \cdots + L\_{k} x\_{k}^{2} + P + R\_{k} x\_{k}^{-2} + \cdots + R\_{1} x\_{1}^{-2}&\overset ? = aG + bH + abQ, +\end{aligned} +\\] +where \\(G, H = {\mathbf{G}}\_{0}, {\mathbf{H}}\_{0}\\). + +To make the protocol noninteractive, we replace the transmission of the +\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(x\_{j}\\) with a Fiat-Shamir +challenge, so that each \\(x\_{j}\\) is generated as a hash of the transcript +\\(L\_{k},R\_{k},\ldots,L\_{j},R\_{j}\\). At the end of the prover’s +computation, they send \\(a,b,L\_{k},R\_{k},\ldots,L\_{1},R\_{1}\\) to the +verifier. + +Since the final \\(G\\) and \\(H\\) values are functions of the challenges +\\(x\_{k},\ldots,x\_{1}\\), the verifier has to compute them as part of the +verification process. However, while the prover needs to compute the +intermediate vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) in order to compute +the \\(L\_{j}\\) and \\(R\_{j}\\), the verifier doesn’t, and can compute the final +\\(G\\), \\(H\\) directly from the vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and +the challenges \\(x\_{k}, \ldots, x\_{1}\\). + +Let \\({\mathbf{G}}^{(j)}\\) be the value of \\({\mathbf{G}}\\) in the \\(j\\)-th +round, and let \\(G\_{i}\\) be the \\(i\\)-th entry of the initial vector +\\({\mathbf{G}}^{(k)} = +(G\_{0}, \ldots, G\_{n-1})\\). We have \\[ +\begin{aligned} + {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} x\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} x\_{j},\end{aligned} +\\] +so the coefficient of \\(G\_{i}\\) in the final \\(G\\) value is +\\[ +\begin{aligned} + s\_{i} &= x\_{k}^{b(i,k)} \cdots x\_1^{b(i,1)},\end{aligned} +\\] where +\\(b(i,j)\\) is either \\(-1\\) or \\(+1\\), according to whether \\(G\_{i}\\) appears in +the left or right half of \\({\mathbf{G}}^{(j)}\\). Since \\(G\_{i}\\) appears in +the \\((i \mod 2^{j})\\)-th entry of \\({\mathbf{G}}^{j}\\), this is +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if \\((i \mod 2^{j}) < 2^{j-1}\\) }\\\\ + +1 & \text{if \\((i \mod 2^{j}) \ge 2^{j-1}\\) }\\\\ + \end{cases}. +\\] +But this is exactly +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if bit \\(j-1\\) of \\(i\\) is 0} \\\\ + +1 & \text{if bit \\(j-1\\) of \\(i\\) is 1} \\\\ + \end{cases}. +\\] +This shows that +\\(G = {\langle {\mathbf{s}}, {\mathbf{G}} \rangle}\\). This formula differs +slightly from the one in the paper, because we index vectors and bits +from \\(0\\). + +Since \\(H\\) is computed similarly, but with the roles of +\\({\mathbf{H}}\_{\operatorname{lo}}\\) and +\\({\mathbf{H}}\_{\operatorname{hi}}\\) reversed, a similar argument shows +that \\(H = {\langle 1/{\mathbf{s}}, {\mathbf{H}} \rangle}\\). +Notice that +if \\(i'\\) is the bitwise NOT of \\(i\\), then \\(s\_{i'} = +1/s\_{i}\\), so the vector of inverses \\(1/{\mathbf{s}}\\) is a permutation of +the vector \\({\mathbf{s}}\\) and no additional computation is required to +obtain the \\(1/s\_{i}\\). + +The verifier’s computation then becomes +\\[ +\begin{aligned} +P \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right) \\\\ +=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right), +\end{aligned} +\\] +a single multiscalar multiplication with +\\(n + n + 1 + k + k = 2(n+k) + 1\\) points. + +[^1]: The blinding factor is synthetic in the sense that it is + synthesized from the blinding factors of the other commitments. diff --git a/src/lib.rs b/src/lib.rs index 321b2695..f50e3b1c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ #![cfg_attr(feature = "bench", feature(test))] #![feature(nll)] #![feature(test)] +#![feature(external_doc)] extern crate byteorder; extern crate curve25519_dalek; @@ -13,6 +14,10 @@ extern crate test; mod util; +#[doc(include = "../docs/notes.md")] +mod notes { +} + pub mod proof_transcript; pub mod generators; mod range_proof; From f2cdfe4ad590a00eec698dc2a1270b0ab66aca21 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 29 Mar 2018 15:01:30 -0700 Subject: [PATCH 076/186] Move IPP protocol description to inner_product_proof module --- docs/inner-product-protocol.md | 120 ++++++++++++++++++++++++++++++++ docs/notes.md | 121 --------------------------------- src/inner_product_proof.rs | 2 + 3 files changed, 122 insertions(+), 121 deletions(-) create mode 100644 docs/inner-product-protocol.md diff --git a/docs/inner-product-protocol.md b/docs/inner-product-protocol.md new file mode 100644 index 00000000..d50914b3 --- /dev/null +++ b/docs/inner-product-protocol.md @@ -0,0 +1,120 @@ +Inner product argument protocol +=============================== + +We want to prove the relation +\\[ +\operatorname{PK}\left\\{ + ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P, Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) + : P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q +\right\\} +\\] where \\(n = 2^{k}\\) is a power of \\(2\\). + +To start, we sketch the +interactive version of this protocol, and then describe the +optimizations discussed in the Bulletproofs paper for the +non-interactive version. + +The protocol consists of \\(k = \lg n\\) rounds, indexed by +\\(j = k,\ldots,1\\). In the \\(j\\)-th round, the prover computes +\\[ +\begin{aligned} + L\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{G}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{hi}}, {\mathbf{H}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{b}}\_{\operatorname{hi}} \rangle} Q, \\\\ + R\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{G}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{lo}}, {\mathbf{H}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{b}}\_{\operatorname{lo}} \rangle} Q, +\end{aligned} +\\] +and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a +challenge value \\(x\_{j} {\xleftarrow{\$}}{\mathbb Z\_p}\\). The prover uses +\\(x\_{j}\\) to compute +\\[ +\begin{aligned} + {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ + {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, +\end{aligned} +\\] +the prover and verifier both compute +\\[ +\begin{aligned} + {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ + {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, +\end{aligned} +\\] +and use these vectors (all of length \\(2^{j-1}\\)) for the next round. +After the last (\\(j = 1\\)) round, the prover sends +\\(a, b = {\mathbf{a}}\_{0}, {\mathbf{b}}\_{0}\\) to the verifier, who accepts +if and only if +\\[ +\begin{aligned} +L\_{1} x\_{1}^{2} + \cdots + L\_{k} x\_{k}^{2} + P + R\_{k} x\_{k}^{-2} + \cdots + R\_{1} x\_{1}^{-2}&\overset ? = aG + bH + abQ, +\end{aligned} +\\] +where \\(G, H = {\mathbf{G}}\_{0}, {\mathbf{H}}\_{0}\\). + +To make the protocol noninteractive, we replace the transmission of the +\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(x\_{j}\\) with a Fiat-Shamir +challenge, so that each \\(x\_{j}\\) is generated as a hash of the transcript +\\(L\_{k},R\_{k},\ldots,L\_{j},R\_{j}\\). At the end of the prover’s +computation, they send \\(a,b,L\_{k},R\_{k},\ldots,L\_{1},R\_{1}\\) to the +verifier. + +Since the final \\(G\\) and \\(H\\) values are functions of the challenges +\\(x\_{k},\ldots,x\_{1}\\), the verifier has to compute them as part of the +verification process. However, while the prover needs to compute the +intermediate vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) in order to compute +the \\(L\_{j}\\) and \\(R\_{j}\\), the verifier doesn’t, and can compute the final +\\(G\\), \\(H\\) directly from the vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and +the challenges \\(x\_{k}, \ldots, x\_{1}\\). + +Let \\({\mathbf{G}}^{(j)}\\) be the value of \\({\mathbf{G}}\\) in the \\(j\\)-th +round, and let \\(G\_{i}\\) be the \\(i\\)-th entry of the initial vector +\\({\mathbf{G}}^{(k)} = +(G\_{0}, \ldots, G\_{n-1})\\). We have \\[ +\begin{aligned} + {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} x\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} x\_{j},\end{aligned} +\\] +so the coefficient of \\(G\_{i}\\) in the final \\(G\\) value is +\\[ +\begin{aligned} + s\_{i} &= x\_{k}^{b(i,k)} \cdots x\_1^{b(i,1)},\end{aligned} +\\] where +\\(b(i,j)\\) is either \\(-1\\) or \\(+1\\), according to whether \\(G\_{i}\\) appears in +the left or right half of \\({\mathbf{G}}^{(j)}\\). Since \\(G\_{i}\\) appears in +the \\((i \mod 2^{j})\\)-th entry of \\({\mathbf{G}}^{j}\\), this is +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if $(i \mod 2^{j}) < 2^{j-1}$ }\\\\ + +1 & \text{if $(i \mod 2^{j}) \ge 2^{j-1}$ }\\\\ + \end{cases}. +\\] +But this is exactly +\\[ + b(i,j) = + \begin{cases} + -1 & \text{if bit $j-1$ of $i$ is 0} \\\\ + +1 & \text{if bit $j-1$ of $i$ is 1} \\\\ + \end{cases}. +\\] +This shows that +\\(G = {\langle {\mathbf{s}}, {\mathbf{G}} \rangle}\\). This formula differs +slightly from the one in the paper, because we index vectors and bits +from \\(0\\). + +Since \\(H\\) is computed similarly, but with the roles of +\\({\mathbf{H}}\_{\operatorname{lo}}\\) and +\\({\mathbf{H}}\_{\operatorname{hi}}\\) reversed, a similar argument shows +that \\(H = {\langle 1/{\mathbf{s}}, {\mathbf{H}} \rangle}\\). +Notice that +if \\(i'\\) is the bitwise NOT of \\(i\\), then \\(s\_{i'} = +1/s\_{i}\\), so the vector of inverses \\(1/{\mathbf{s}}\\) is a permutation of +the vector \\({\mathbf{s}}\\) and no additional computation is required to +obtain the \\(1/s\_{i}\\). + +The verifier’s computation then becomes +\\[ +\begin{aligned} +P \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right) \\\\ +=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right), +\end{aligned} +\\] +a single multiscalar multiplication with +\\(n + n + 1 + k + k = 2(n+k) + 1\\) points. diff --git a/docs/notes.md b/docs/notes.md index b443f391..2c0ffef1 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -420,126 +420,5 @@ so the verifier uses \\(P\\), \\(t(x)\\) as inputs to the inner-product protocol to prove that \\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). -Inner product argument protocol -=============================== - -We want to prove the relation -\\[ -\operatorname{PK}\left\\{ - ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P, Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) - : P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q -\right\\} -\\] where \\(n = 2^{k}\\) is a power of \\(2\\). - -To start, we sketch the -interactive version of this protocol, and then describe the -optimizations discussed in the Bulletproofs paper for the -non-interactive version. - -The protocol consists of \\(k = \lg n\\) rounds, indexed by -\\(j = k,\ldots,1\\). In the \\(j\\)-th round, the prover computes -\\[ -\begin{aligned} - L\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{G}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{hi}}, {\mathbf{H}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{lo}}, {\mathbf{b}}\_{\operatorname{hi}} \rangle} Q, \\\\ - R\_{j} &\gets {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{G}}\_{\operatorname{lo}} \rangle} + {\langle {\mathbf{b}}\_{\operatorname{lo}}, {\mathbf{H}}\_{\operatorname{hi}} \rangle} + {\langle {\mathbf{a}}\_{\operatorname{hi}}, {\mathbf{b}}\_{\operatorname{lo}} \rangle} Q, -\end{aligned} -\\] -and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a -challenge value \\(x\_{j} {\xleftarrow{\$}}{\mathbb Z\_p}\\). The prover uses -\\(x\_{j}\\) to compute -\\[ -\begin{aligned} - {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ - {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, -\end{aligned} -\\] -the prover and verifier both compute -\\[ -\begin{aligned} - {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ - {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, -\end{aligned} -\\] -and use these vectors (all of length \\(2^{j-1}\\)) for the next round. -After the last (\\(j = 1\\)) round, the prover sends -\\(a, b = {\mathbf{a}}\_{0}, {\mathbf{b}}\_{0}\\) to the verifier, who accepts -if and only if -\\[ -\begin{aligned} -L\_{1} x\_{1}^{2} + \cdots + L\_{k} x\_{k}^{2} + P + R\_{k} x\_{k}^{-2} + \cdots + R\_{1} x\_{1}^{-2}&\overset ? = aG + bH + abQ, -\end{aligned} -\\] -where \\(G, H = {\mathbf{G}}\_{0}, {\mathbf{H}}\_{0}\\). - -To make the protocol noninteractive, we replace the transmission of the -\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(x\_{j}\\) with a Fiat-Shamir -challenge, so that each \\(x\_{j}\\) is generated as a hash of the transcript -\\(L\_{k},R\_{k},\ldots,L\_{j},R\_{j}\\). At the end of the prover’s -computation, they send \\(a,b,L\_{k},R\_{k},\ldots,L\_{1},R\_{1}\\) to the -verifier. - -Since the final \\(G\\) and \\(H\\) values are functions of the challenges -\\(x\_{k},\ldots,x\_{1}\\), the verifier has to compute them as part of the -verification process. However, while the prover needs to compute the -intermediate vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) in order to compute -the \\(L\_{j}\\) and \\(R\_{j}\\), the verifier doesn’t, and can compute the final -\\(G\\), \\(H\\) directly from the vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and -the challenges \\(x\_{k}, \ldots, x\_{1}\\). - -Let \\({\mathbf{G}}^{(j)}\\) be the value of \\({\mathbf{G}}\\) in the \\(j\\)-th -round, and let \\(G\_{i}\\) be the \\(i\\)-th entry of the initial vector -\\({\mathbf{G}}^{(k)} = -(G\_{0}, \ldots, G\_{n-1})\\). We have \\[ -\begin{aligned} - {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} x\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} x\_{j},\end{aligned} -\\] -so the coefficient of \\(G\_{i}\\) in the final \\(G\\) value is -\\[ -\begin{aligned} - s\_{i} &= x\_{k}^{b(i,k)} \cdots x\_1^{b(i,1)},\end{aligned} -\\] where -\\(b(i,j)\\) is either \\(-1\\) or \\(+1\\), according to whether \\(G\_{i}\\) appears in -the left or right half of \\({\mathbf{G}}^{(j)}\\). Since \\(G\_{i}\\) appears in -the \\((i \mod 2^{j})\\)-th entry of \\({\mathbf{G}}^{j}\\), this is -\\[ - b(i,j) = - \begin{cases} - -1 & \text{if \\((i \mod 2^{j}) < 2^{j-1}\\) }\\\\ - +1 & \text{if \\((i \mod 2^{j}) \ge 2^{j-1}\\) }\\\\ - \end{cases}. -\\] -But this is exactly -\\[ - b(i,j) = - \begin{cases} - -1 & \text{if bit \\(j-1\\) of \\(i\\) is 0} \\\\ - +1 & \text{if bit \\(j-1\\) of \\(i\\) is 1} \\\\ - \end{cases}. -\\] -This shows that -\\(G = {\langle {\mathbf{s}}, {\mathbf{G}} \rangle}\\). This formula differs -slightly from the one in the paper, because we index vectors and bits -from \\(0\\). - -Since \\(H\\) is computed similarly, but with the roles of -\\({\mathbf{H}}\_{\operatorname{lo}}\\) and -\\({\mathbf{H}}\_{\operatorname{hi}}\\) reversed, a similar argument shows -that \\(H = {\langle 1/{\mathbf{s}}, {\mathbf{H}} \rangle}\\). -Notice that -if \\(i'\\) is the bitwise NOT of \\(i\\), then \\(s\_{i'} = -1/s\_{i}\\), so the vector of inverses \\(1/{\mathbf{s}}\\) is a permutation of -the vector \\({\mathbf{s}}\\) and no additional computation is required to -obtain the \\(1/s\_{i}\\). - -The verifier’s computation then becomes -\\[ -\begin{aligned} -P \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right) \\\\ -=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right), -\end{aligned} -\\] -a single multiscalar multiplication with -\\(n + n + 1 + k + k = 2(n+k) + 1\\) points. - [^1]: The blinding factor is synthetic in the sense that it is synthesized from the blinding factors of the other commitments. diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index f9b29670..b0ab79b7 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -1,5 +1,7 @@ #![allow(non_snake_case)] +#![doc(include = "../docs/inner-product-protocol.md")] + use std::iter; use std::borrow::Borrow; From 242913bc9848fe7bf311e32a214682892ccb0bf3 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 29 Mar 2018 15:46:01 -0700 Subject: [PATCH 077/186] Import IPP notes from TeX There are some broken environments, but I'll leave them to be edited. --- docs/notes.md | 151 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 149 insertions(+), 2 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 2c0ffef1..75f7ba93 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -343,6 +343,9 @@ bottom row of the diagram to check consistency: t(x) B + {\widetilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. \\] +[^1]: The blinding factor is synthetic in the sense that it is + synthesized from the blinding factors of the other commitments. + Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct --------------------------------------------------------------------- @@ -420,5 +423,149 @@ so the verifier uses \\(P\\), \\(t(x)\\) as inputs to the inner-product protocol to prove that \\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). -[^1]: The blinding factor is synthetic in the sense that it is - synthesized from the blinding factors of the other commitments. +Inner-Product Proof +=================== + +First, let’s observe that the prover can simply send vectors +\\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) and the verifier can check +directly that the inner product \\(t(x)\\) and commitment \\(P\\) provided in +the protocols 1 and 2 are correct. This will not leak information (the +secret bits in these vectors are blinded), but will require us to +transfer \\(2n\\) scalars between a prover and a verifier. + +To minimize the bandwidth cost we will use the inner-product argument +protocol which enables us to prove *indirectly* and with \\(O(log(n))\\) +communication cost, that a given inner product \\(t(x)\\) and a commitment +\\(P\\) are related as: +\\[ +\begin{aligned} +t(x) &= {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} \\\\ +P &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle} +\end{aligned} +\\] +To make the presentation +cleaner, we will change the notation to one used specifically in the +inner product argument which is not to be confused with the notation in +the rangeproof protocol: +\\[ +\begin{split} +{\mathbf{a}}, {\mathbf{b}} &\in {\mathbb Z\_{p}^{n}}\\\\ +{\mathbf{G}}, {\mathbf{H}} &\in {\mathbb G^{n}}\\\\ +c &= {\langle {\mathbf{a}}, {\mathbf{b}} \rangle}\\\\ +P &= {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} +\end{split} +\\] +Within the above definitions we need a proof of knowledge +for the following relation: +\\[ +\begin{aligned} + P &{}={}&& {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} \hspace{0.2cm} \wedge\\\\ + c &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} +\end{aligned} +\\] +let’s compress these two statements into one equation using an +indeterminate variable \\(w \in {\mathbb Z\_{p}^{\times}}\\) and multiplying the +second equation by an additional orthogonal generator +\\(Q \in {\mathbb G}\\): +\\[ +\begin{aligned} + P &{}={}&& {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle}\\\\ + &{}+{}&&\\\\ + c \cdot w \cdot Q &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \cdot w \cdot Q +\end{aligned} +\\] +let’s simplify the resulting equation using the following definitions: +\\[ +\begin{aligned} + k &= \lg n \\\\ + P\_k &= P + c \cdot w \cdot Q \\\\ + \hat{Q} &= w \cdot Q +\end{aligned} +\\] +The equation becomes: +\\[ + P\_k = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \cdot \hat{Q} +\\] +If the prover can demonstrate that the above \\(P\_k\\) has such structure +over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(\hat Q\\) for all +\\(w \in {\mathbb Z\_{p}^{*}}\\), then the original \\(P\\) and \\(c\\) must satisfy +the original relation (\[eqn:ip\\_cp\\_def\]). + +The equation (\[eqn:ip\\_inner\\_eq\]) is useful because it will allow us +to compress each vector in half and arrive to the same form. By doing +such compression \\(\lg n\\) times we will end up with an equation where +both vectors are one-element long and we can simply transmit them to +check the final equality directly. + +let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times}}\\) +and compress the vectors by adding the left and the right halves +separated by the variable \\(u\_k\\): +\\[ +\begin{split} + {\mathbf{a}}^{(k-1)} &= {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R \\\\ + {\mathbf{b}}^{(k-1)} &= {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \\\\ + {\mathbf{G}}^{(k-1)} &= {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \\\\ + {\mathbf{H}}^{(k-1)} &= {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R +\end{split} +\\] +The powers of \\(u\_k\\) are chosen so they cancel out in the +inner products of interest as will be shown below. + +let’s now form the equation (\[eqn:ip\\_inner\\_eq\]) with these +compressed vectors: +\\[ + P\_{k-1} = {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{G}}^{(k-1)} \rangle} + {\langle {\mathbf{b}}^{(k-1)}, {\mathbf{H}}^{(k-1)} \rangle} + {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{b}}^{(k-1)} \rangle} \cdot \hat{Q} +\\] +Expanding it in terms of the original \\({\mathbf{a}}\\), \\({\mathbf{b}}\\), +\\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) gives: +\\[ +\begin{aligned} + P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L \cdot u\_k + u\_k^{-1} \cdot {\mathbf{a}}\_R, &&{\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R, &&{\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, &&{\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot \hat{Q} +\end{aligned} +\\] +Breaking down in simpler products: +\\[ +\begin{aligned} + P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_R \rangle} &{}+{}& u\_k^2 {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + \\\\ + && &{\langle {\mathbf{b}}\_L, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_R \rangle} &{}+{}& u^2\_k {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + u^{-2}\_k {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + \\\\ + && &({\langle {\mathbf{a}}\_L, {\mathbf{b}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_R \rangle})\cdot \hat{Q} &{}+{}& (u^2\_k {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle}) \cdot \hat{Q} +\end{aligned} +\\] +We now see that the left two columns in the above equation is the +definition of \\(P\_k\\), while various cross terms on the right are +separated from \\(P\_k\\) by an indeterminate variable \\(u\_k\\). let’s group all +terms with \\(u^2\_k\\) as \\(L\_k\\) and all terms with \\(u^{-2}\_k\\) as \\(R\_k\\): +\\[ +\begin{aligned} + P\_{k-1} &= P\_k + u^2\_k \cdot L\_k + u^{-2}\_k \cdot R\_k\\\\ + L\_k &= {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} \cdot \hat{Q}\\\\ + R\_k &= {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle} \cdot \hat{Q} +\end{aligned} +\\] +If the prover commits to \\(L\_k\\) and \\(R\_k\\) before \\(u\_k\\) is randomly +sampled, then if the (\[eqn:ip\\_inner\\_eq\\_compressed\]) is proven to be +true, it will follow that (\[eqn:ip\\_inner\\_eq\]) is also true with an +overwhelming probability. + +We can compress the resulting statement +(\[eqn:ip\\_inner\\_eq\\_compressed\]) using one more indeterminate +variable \\(u\_{k-1}\\) as specified in (\[eqn:ip\\_compression\]) and arrive +to even shorter vectors. We will continue doing so until we end up with +vectors +\\({\mathbf{a}}^{(0)}, {\mathbf{b}}^{(0)}, {\mathbf{G}}^{(0)}, {\mathbf{H}}^{(0)}\\), +each containing one item: +\\[ + P\_0 = a^{(0)}\_0 \cdot G^{(0)}\_0 + b^{(0)}\_0 \cdot H^{(0)}\_0 + a^{(0)}\_0 \cdot b^{(0)}\_0 \cdot \hat{Q} +\\] +At this point the prover can transmit two scalars \\(a^{(0)}\_0\\) and +\\(b^{(0)}\_0\\) to the verifier, so they check +(\[eqn:ip\\_inner\\_eq\\_final\]) directly by computing both sides of the +equation. + +The resulting protocol has \\(\lg n\\) steps of compression where the prover +sends a pair \\((L\_j,R\_j)\\) of points at each step \\(j = k\dots1\\). An +additional and final step involves sending a pair of scalars +\\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the relation +(\[eqn:ip\\_inner\\_eq\\_final\]). \ No newline at end of file From 5b36238d5f6d8529325348c7cc5b18faec9dafaf Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 29 Mar 2018 16:08:41 -0700 Subject: [PATCH 078/186] Update README and use it for the crate docs --- README.md | 41 +++++++++++++++++++++++++++++++---------- src/lib.rs | 2 ++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index bfca986c..714468e3 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,40 @@ # Ristretto Bulletproofs -Implementing [bulletproofs](https://crypto.stanford.edu/bulletproofs/) using [ristretto](https://github.com/dalek-cryptography/ed25519-dalek). +A pure-Rust implementation of [Bulletproofs][bp_website] using [Ristretto][ristretto]. -Step 1 of a larger proposed proposed plan, detailed [here](https://github.com/chain/research/issues/7). +This crate contains both an implementation and a set of notes on how and why +Bulletproofs work. The external documentation describes how to use this +crate's API, while the internal documentation contains the notes. +Unfortunately, `cargo doc` does not yet have support for custom HTML injection +and for documenting private members, so the documentation is built using: -## Development +```text +make doc # Builds external documentation +make doc-internal # Builds internal documentation +``` -Run tests: +The description of how Bulletproofs work is found in the (internal) `notes` module. -``` -cargo test -``` +## WARNING -Run benchmarks: +This code is still research-quality. It is not (yet) suitable for deployment. +## Tests + +Run tests with `cargo test`. + +## Features + +The `yolocrypto` feature enables the `yolocrypto` feature in +`curve25519-dalek`, which enables the experimental AVX2 backend. To use it for +Bulletproofs, the `target_cpu` must support AVX2: + +```text +RUSTFLAGS="-C target_cpu=skylake" cargo bench --features "yolocrypto" ``` -cargo bench -``` + +This crate uses [criterion.rs][criterion] for benchmarks. + +[bp_website]: https://crypto.stanford.edu/bulletproofs/ +[ristretto]: https://doc.dalek.rs/curve25519_dalek/ristretto/index.html +[criterion]: https://github.com/japaric/criterion.rs diff --git a/src/lib.rs b/src/lib.rs index f50e3b1c..d0597b34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,8 @@ #![feature(test)] #![feature(external_doc)] +#![doc(include = "../README.md")] + extern crate byteorder; extern crate curve25519_dalek; extern crate rand; From a8e4b73ff8ecf0728512481217a617a8d107ed5b Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 29 Mar 2018 16:15:24 -0700 Subject: [PATCH 079/186] Move inner_product and add_vec into util This prevents them being exported as part of the public API, and is also where they probably belong. --- src/inner_product_proof.rs | 8 +++--- src/range_proof.rs | 51 ++++---------------------------------- src/util.rs | 41 ++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 51 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index b0ab79b7..b738d239 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -18,8 +18,6 @@ use util; use generators::Generators; -use range_proof::inner_product; - use sha2::Sha512; #[derive(Clone, Debug)] @@ -85,8 +83,8 @@ impl Proof { let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = inner_product(&a_L, &b_R); - let c_R = inner_product(&a_R, &b_L); + let c_L = util::inner_product(&a_L, &b_R); + let c_R = util::inner_product(&a_R, &b_L); let L = ristretto::vartime::multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), @@ -245,7 +243,7 @@ mod tests { // a and b are the vectors for which we want to prove c = let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); - let c = inner_product(&a, &b); + let c = util::inner_product(&a, &b); // y_inv is (the inverse of) a random challenge let y_inv = Scalar::random(&mut rng); diff --git a/src/range_proof.rs b/src/range_proof.rs index ba2e58cb..68919fdf 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -284,13 +284,13 @@ impl VecPoly2 { let l = self; let r = rhs; - let t0 = inner_product(&l.0, &r.0); - let t2 = inner_product(&l.1, &r.1); + let t0 = util::inner_product(&l.0, &r.0); + let t2 = util::inner_product(&l.1, &r.1); - let l0_plus_l1 = add_vec(&l.0, &l.1); - let r0_plus_r1 = add_vec(&r.0, &r.1); + let l0_plus_l1 = util::add_vec(&l.0, &l.1); + let r0_plus_r1 = util::add_vec(&r.0, &r.1); - let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + let t1 = util::inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; PolyDeg3(t0, t1, t2) } @@ -305,30 +305,6 @@ impl VecPoly2 { } } -pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { - let mut out = Scalar::zero(); - if a.len() != b.len() { - // throw some error - println!("lengths of vectors don't match for inner product multiplication"); - } - for i in 0..a.len() { - out += a[i] * b[i]; - } - out -} - -pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { - let mut out = Vec::new(); - if a.len() != b.len() { - // throw some error - println!("lengths of vectors don't match for vector addition"); - } - for i in 0..a.len() { - out.push(a[i] + b[i]); - } - out -} - #[cfg(test)] mod tests { use super::*; @@ -360,23 +336,6 @@ mod tests { assert_eq!(power_g, delta(n, &y, &z),); } - #[test] - fn test_inner_product() { - let a = vec![ - Scalar::from_u64(1), - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - ]; - let b = vec![ - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - Scalar::from_u64(5), - ]; - assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); - } - fn create_and_verify_helper(n: usize) { let generators = Generators::new(n, 1); let mut transcript = ProofTranscript::new(b"RangeproofTest"); diff --git a/src/util.rs b/src/util.rs index 1d21d7fb..58224a47 100644 --- a/src/util.rs +++ b/src/util.rs @@ -27,6 +27,30 @@ pub fn exp_iter(x: Scalar) -> ScalarExp { ScalarExp { x, next_exp_x } } +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { + let mut out = Scalar::zero(); + if a.len() != b.len() { + // throw some error + println!("lengths of vectors don't match for inner product multiplication"); + } + for i in 0..a.len() { + out += a[i] * b[i]; + } + out +} + +pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { + let mut out = Vec::new(); + if a.len() != b.len() { + // throw some error + println!("lengths of vectors don't match for vector addition"); + } + for i in 0..a.len() { + out.push(a[i] + b[i]); + } + out +} + #[cfg(test)] mod tests { use super::*; @@ -40,4 +64,21 @@ mod tests { assert_eq!(exp_2[2], Scalar::from_u64(4)); assert_eq!(exp_2[3], Scalar::from_u64(8)); } + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } } From e03b6859dd12c6a72d0dba4bf1245ce5c11218a5 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 29 Mar 2018 16:19:43 -0700 Subject: [PATCH 080/186] Hide internal modules --- src/generators.rs | 2 +- src/lib.rs | 11 ++++++----- src/proof_transcript.rs | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/generators.rs b/src/generators.rs index eadeab15..1791034b 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -6,7 +6,7 @@ //! //! ``` //! # extern crate ristretto_bulletproofs; -//! # use ristretto_bulletproofs::generators::Generators; +//! # use ristretto_bulletproofs::Generators; //! # fn main() { //! //! let generators = Generators::new(64,1); diff --git a/src/lib.rs b/src/lib.rs index d0597b34..c34b8795 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,12 +20,13 @@ mod util; mod notes { } -pub mod proof_transcript; -pub mod generators; +mod proof_transcript; +mod generators; mod range_proof; mod inner_product_proof; -pub mod scalar; +mod scalar; -pub use range_proof::*; -pub use generators::*; +pub use proof_transcript::ProofTranscript; +pub use range_proof::RangeProof; +pub use generators::Generators; diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index 2ffd1ddb..264b7bf9 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -39,7 +39,7 @@ use byteorder::{ByteOrder, LittleEndian}; /// ``` /// # extern crate curve25519_dalek; /// # extern crate ristretto_bulletproofs; -/// # use ristretto_bulletproofs::proof_transcript::ProofTranscript; +/// # use ristretto_bulletproofs::ProofTranscript; /// # fn main() { /// /// use curve25519_dalek::constants; From fbe1a841d7db7de97551fb9026375c22ffa75ed4 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 11:06:49 -0700 Subject: [PATCH 081/186] fix an IPP compression formula --- docs/notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 75f7ba93..7a908d80 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -501,12 +501,12 @@ let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times} and compress the vectors by adding the left and the right halves separated by the variable \\(u\_k\\): \\[ -\begin{split} +\begin{aligned} {\mathbf{a}}^{(k-1)} &= {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R \\\\ {\mathbf{b}}^{(k-1)} &= {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \\\\ {\mathbf{G}}^{(k-1)} &= {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \\\\ {\mathbf{H}}^{(k-1)} &= {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R -\end{split} +\end{aligned} \\] The powers of \\(u\_k\\) are chosen so they cancel out in the inner products of interest as will be shown below. From e9938490ced6d998556890443db4780b1c59c2e7 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 11:36:47 -0700 Subject: [PATCH 082/186] fixed another formula --- docs/notes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 7a908d80..1f11d11f 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -520,9 +520,9 @@ Expanding it in terms of the original \\({\mathbf{a}}\\), \\({\mathbf{b}}\\), \\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) gives: \\[ \begin{aligned} - P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L \cdot u\_k + u\_k^{-1} \cdot {\mathbf{a}}\_R, &&{\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \rangle} + \\\\ - && &{\langle {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R, &&{\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R \rangle} + \\\\ - && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, &&{\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot \hat{Q} + P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L \cdot u\_k + u\_k^{-1} \cdot {\mathbf{a}}\_R, {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R, {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R \rangle} + \\\\ + && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot \hat{Q} \end{aligned} \\] Breaking down in simpler products: From c0de2690332446cb8729193d79e384fa7f23b4ba Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 11:53:03 -0700 Subject: [PATCH 083/186] one more fix --- docs/notes.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 1f11d11f..7f6a12ff 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -240,7 +240,7 @@ z^2 v {\langle {\mathbf{a}}\_{L} - z{\mathbf{1}}, z^{2} {\mathbf{2}}^n + z {\mathbf{y}}^{n} + {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} \end{aligned} \\] -Writing +Combining all non-secret terms outside the inner product \\[ \delta(y,z) = (z - z^{2}) {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \rangle} - z^{3} {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}, \\] @@ -448,12 +448,12 @@ cleaner, we will change the notation to one used specifically in the inner product argument which is not to be confused with the notation in the rangeproof protocol: \\[ -\begin{split} +\begin{aligned} {\mathbf{a}}, {\mathbf{b}} &\in {\mathbb Z\_{p}^{n}}\\\\ {\mathbf{G}}, {\mathbf{H}} &\in {\mathbb G^{n}}\\\\ c &= {\langle {\mathbf{a}}, {\mathbf{b}} \rangle}\\\\ P &= {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} -\end{split} +\end{aligned} \\] Within the above definitions we need a proof of knowledge for the following relation: From 8c63637c01cac6fee55f5da2b715f3dc11495fb8 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 12:00:03 -0700 Subject: [PATCH 084/186] more edits --- docs/notes.md | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 7f6a12ff..c14bd5b0 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -271,13 +271,15 @@ and uses them to construct vector polynomials {\mathbf{r}}(x) &= {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x = {\mathbf{y}}^{n} \circ \left( ({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\right) + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} \end{aligned} \\] -These are the left and right sides of the inner product -(\[eqn:single\\_inner\]), with \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\) +These are the left and right sides of the combined inner product with \\({\mathbf{a}}\_{L}\\), \\({\mathbf{a}}\_{R}\\) replaced by blinded terms \\({\mathbf{a}}\_{L} + {\mathbf{s}}\_{L} x\\), \\({\mathbf{a}}\_{R} + {\mathbf{s}}\_{R} x\\). Notice that since only the blinding factors \\({\mathbf{s}}\_{L}\\), \\({\mathbf{s}}\_{R}\\) are multiplied by \\(x\\), the vectors \\({\mathbf{l}}\_{0}\\) and \\({\mathbf{r}}\_{0}\\) are -exactly the left and right sides of the unblinded single inner-product. +exactly the left and right sides of the unblinded single inner-product: +\\[ + {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle} = z^{2}v + \delta(y,z) +\\] Setting \\[ @@ -486,18 +488,23 @@ The equation becomes: \\[ P\_k = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \cdot \hat{Q} \\] -If the prover can demonstrate that the above \\(P\_k\\) has such structure -over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(\hat Q\\) for all -\\(w \in {\mathbb Z\_{p}^{*}}\\), then the original \\(P\\) and \\(c\\) must satisfy -the original relation (\[eqn:ip\\_cp\\_def\]). - -The equation (\[eqn:ip\\_inner\\_eq\]) is useful because it will allow us +The combined equation is useful because it will allow us to compress each vector in half and arrive to the same form. By doing such compression \\(\lg n\\) times we will end up with an equation where both vectors are one-element long and we can simply transmit them to check the final equality directly. -let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times}}\\) +If the prover can demonstrate that the above \\(P\_k\\) has such structure +over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(\hat Q\\) for all +\\(w \in {\mathbb Z\_{p}^{*}}\\), then the original \\(P\\) and \\(c\\) must satisfy +the original relation +\\[ +\begin{aligned} + P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} \wedge + c = {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} +\end{aligned} +\\] +Let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times}}\\) and compress the vectors by adding the left and the right halves separated by the variable \\(u\_k\\): \\[ From bbd00d7c4de4691d1683d8813f047bfa2057bb65 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 12:09:46 -0700 Subject: [PATCH 085/186] more fixes to the eqn references --- docs/notes.md | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index c14bd5b0..acbf07b5 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -518,8 +518,7 @@ separated by the variable \\(u\_k\\): The powers of \\(u\_k\\) are chosen so they cancel out in the inner products of interest as will be shown below. -let’s now form the equation (\[eqn:ip\\_inner\\_eq\]) with these -compressed vectors: +Let’s now define \\(P\_{k-1}\\) using the same equation as for \\(P_k\\), but using the compressed vectors: \\[ P\_{k-1} = {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{G}}^{(k-1)} \rangle} + {\langle {\mathbf{b}}^{(k-1)}, {\mathbf{H}}^{(k-1)} \rangle} + {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{b}}^{(k-1)} \rangle} \cdot \hat{Q} \\] @@ -542,7 +541,7 @@ Breaking down in simpler products: \\] We now see that the left two columns in the above equation is the definition of \\(P\_k\\), while various cross terms on the right are -separated from \\(P\_k\\) by an indeterminate variable \\(u\_k\\). let’s group all +separated from \\(P\_k\\) by an indeterminate variable \\(u\_k\\). Let’s group all terms with \\(u^2\_k\\) as \\(L\_k\\) and all terms with \\(u^{-2}\_k\\) as \\(R\_k\\): \\[ \begin{aligned} @@ -552,13 +551,12 @@ terms with \\(u^2\_k\\) as \\(L\_k\\) and all terms with \\(u^{-2}\_k\\) as \\(R \end{aligned} \\] If the prover commits to \\(L\_k\\) and \\(R\_k\\) before \\(u\_k\\) is randomly -sampled, then if the (\[eqn:ip\\_inner\\_eq\\_compressed\]) is proven to be -true, it will follow that (\[eqn:ip\\_inner\\_eq\]) is also true with an -overwhelming probability. +sampled, then if the statement about compressed vectors is proven to be +true, it will follow that the original statement about uncompressed vectors +is also true with an overwhelming probability. -We can compress the resulting statement -(\[eqn:ip\\_inner\\_eq\\_compressed\]) using one more indeterminate -variable \\(u\_{k-1}\\) as specified in (\[eqn:ip\\_compression\]) and arrive +We can compress the resulting statement about \\(P\_{k-1}\\) using one more indeterminate +variable \\(u\_{k-1}\\) in the same way as we used \\(u\_k\\) and arrive to even shorter vectors. We will continue doing so until we end up with vectors \\({\mathbf{a}}^{(0)}, {\mathbf{b}}^{(0)}, {\mathbf{G}}^{(0)}, {\mathbf{H}}^{(0)}\\), @@ -567,12 +565,10 @@ each containing one item: P\_0 = a^{(0)}\_0 \cdot G^{(0)}\_0 + b^{(0)}\_0 \cdot H^{(0)}\_0 + a^{(0)}\_0 \cdot b^{(0)}\_0 \cdot \hat{Q} \\] At this point the prover can transmit two scalars \\(a^{(0)}\_0\\) and -\\(b^{(0)}\_0\\) to the verifier, so they check -(\[eqn:ip\\_inner\\_eq\\_final\]) directly by computing both sides of the -equation. +\\(b^{(0)}\_0\\) to the verifier, so they check the final statement directly +by computing both sides of the equation. The resulting protocol has \\(\lg n\\) steps of compression where the prover sends a pair \\((L\_j,R\_j)\\) of points at each step \\(j = k\dots1\\). An additional and final step involves sending a pair of scalars -\\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the relation -(\[eqn:ip\\_inner\\_eq\\_final\]). \ No newline at end of file +\\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the final relation directly. From 46d72feea0ae4c0bf0d961229bd155ba5e6c5018 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 12:13:03 -0700 Subject: [PATCH 086/186] another formula fix --- docs/inner-product-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/inner-product-protocol.md b/docs/inner-product-protocol.md index d50914b3..ec7201a1 100644 --- a/docs/inner-product-protocol.md +++ b/docs/inner-product-protocol.md @@ -23,7 +23,7 @@ The protocol consists of \\(k = \lg n\\) rounds, indexed by \end{aligned} \\] and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a -challenge value \\(x\_{j} {\xleftarrow{\$}}{\mathbb Z\_p}\\). The prover uses +challenge value \\(x\_{j} {\xleftarrow{\\$}}{\mathbb{Z}\_p}\\). The prover uses \\(x\_{j}\\) to compute \\[ \begin{aligned} From 5d5f79b60ebc64bb29a5ec24b2669c09ad61311f Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 30 Mar 2018 12:16:17 -0700 Subject: [PATCH 087/186] add a link to 1066.pdf --- docs/notes.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index acbf07b5..73c2fa77 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -11,10 +11,10 @@ documentation describes how it works. Notation ======== -We change notation from the original Bulletproofs paper. The primary -motivation is that our implementation uses additive notation, and we -would like our description of the protocol to use the same notation as -the implementation. +We change notation from the original [Bulletproofs paper][bulletproofs_paper]. +The primary motivation is that our implementation uses additive notation, and +we would like our description of the protocol to use the same notation as the +implementation. In general, we use lower-case letters \\(a, b, c\\) @@ -572,3 +572,5 @@ The resulting protocol has \\(\lg n\\) steps of compression where the prover sends a pair \\((L\_j,R\_j)\\) of points at each step \\(j = k\dots1\\). An additional and final step involves sending a pair of scalars \\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the final relation directly. + +[bulletproofs_paper]: https://eprint.iacr.org/2017/1066.pdf From 666d7a83a5e1e494c3148402528cbb5eb190ed2d Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 30 Mar 2018 12:46:59 -0700 Subject: [PATCH 088/186] Change from 'Decoder Ring' to just 'variable renaming' --- docs/notes.md | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 73c2fa77..18694ffb 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -76,18 +76,7 @@ vector of values \\({\mathbf{a}}\_{L} \Vert {\mathbf{a}}\_{R}\\) with the vector of bases \\({\mathbf{G}} \Vert {\mathbf{H}}\\), but defining the commitment on pairs of vectors is a more convenient notation. -Decoder Ring ------------- - -Mapping from paper notation to this notation: -\\[ -\begin{aligned} - g^a &\xrightarrow{} a \cdot G\\\\ - g \cdot h &\xrightarrow{} G + H\\\\ - g^a \cdot h^y &\xrightarrow{} a \cdot G + y \cdot H\\\\ - {\mathbf{g}}^{{\mathbf{a}}} &\xrightarrow{} {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} = {\textstyle\sum a\_i \cdot G\_i}\end{aligned} -\\] -Variables: +The variable renaming is as follows: \\[ \begin{aligned} g &\xrightarrow{} B & \gamma &\xrightarrow{} \tilde{v} \\\\ @@ -101,7 +90,7 @@ Variables: Range Proofs from Inner Products ================================ -The goal of a *range proof* is for a *prover* to convince a *verifier* +The goal of a *range proof* is for a prover to convince a verifier that a particular value \\(v\\) lies within a valid range, without revealing any additional information about the value \\(v\\). From 6b0538ecc31f28690751313a3daf213ae8052b75 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 30 Mar 2018 12:51:48 -0700 Subject: [PATCH 089/186] Add note on cargo doc --open --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 714468e3..19f6f5a8 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,9 @@ make doc # Builds external documentation make doc-internal # Builds internal documentation ``` +Unfortunately `cargo doc --open` rebuilds the docs without the custom +invocation, so it may be necessary to rerun `make`. + The description of how Bulletproofs work is found in the (internal) `notes` module. ## WARNING From 6f65bc812731b81db14a088e4873cb05843ab2d1 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 30 Mar 2018 12:52:46 -0700 Subject: [PATCH 090/186] Fix missing export --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index c34b8795..c645871c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,4 +29,4 @@ mod scalar; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; -pub use generators::Generators; +pub use generators::{Generators, GeneratorsView}; From 9e7b0cd343c43d9ea4051a916c4aca82c81e32ff Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 9 Apr 2018 13:43:02 -0700 Subject: [PATCH 091/186] rustfmt --- src/lib.rs | 4 +--- src/range_proof.rs | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index c645871c..1fdbd321 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,6 @@ #![feature(nll)] #![feature(test)] #![feature(external_doc)] - #![doc(include = "../README.md")] extern crate byteorder; @@ -17,8 +16,7 @@ extern crate test; mod util; #[doc(include = "../docs/notes.md")] -mod notes { -} +mod notes {} mod proof_transcript; mod generators; diff --git a/src/range_proof.rs b/src/range_proof.rs index 68919fdf..e7898666 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -68,8 +68,7 @@ impl RangeProof { let G = generators.G.to_vec(); let H = generators.H.to_vec(); - let V = - ristretto::multiscalar_mul(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); + let V = ristretto::multiscalar_mul(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); let a_blinding = Scalar::random(rng); @@ -382,4 +381,3 @@ mod tests { create_and_verify_helper(64); } } - From 3e60403d4d4b494c03b2df3763dad734cf7e0fa8 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 9 Apr 2018 15:32:03 -0700 Subject: [PATCH 092/186] Add dalek logo --- src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 1fdbd321..687c221c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,10 @@ #![feature(test)] #![feature(external_doc)] #![doc(include = "../README.md")] +#![doc(html_logo_url = "https://doc.dalek.rs/assets/dalek-logo-clear.png")] + +//! Note that docs will only build on nightly Rust until +//! [RFC 1990 stabilizes](https://github.com/rust-lang/rust/issues/44732). extern crate byteorder; extern crate curve25519_dalek; From edc92569d28f0317231af513f3ee3310693cccf7 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 9 Apr 2018 15:54:30 -0700 Subject: [PATCH 093/186] Make prover constant time --- Cargo.toml | 9 +++++---- src/lib.rs | 1 + src/range_proof.rs | 15 ++++++++------- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9689c73d..e5de73d1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,11 @@ curve25519-dalek = { version = "^0.16", features = ["nightly"] } sha2 = "^0.7" rand = "^0.4" byteorder = "1.2.1" +subtle = "0.6" + +[dependencies.tiny-keccak] +git = 'https://github.com/chain/tiny-keccak.git' +rev = '5925f81b3c351440283c3328e2345d982aac0f6e' [dev-dependencies] hex = "^0.3" @@ -17,10 +22,6 @@ criterion = "0.2" yolocrypto = ["curve25519-dalek/yolocrypto"] std = ["curve25519-dalek/std"] -[dependencies.tiny-keccak] -git = 'https://github.com/chain/tiny-keccak.git' -rev = '5925f81b3c351440283c3328e2345d982aac0f6e' - [[bench]] name = "bulletproofs" harness = false diff --git a/src/lib.rs b/src/lib.rs index 687c221c..782dd6d1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,6 +12,7 @@ extern crate byteorder; extern crate curve25519_dalek; extern crate rand; extern crate sha2; +extern crate subtle; extern crate tiny_keccak; #[cfg(test)] diff --git a/src/range_proof.rs b/src/range_proof.rs index e7898666..1d545e60 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -60,6 +60,8 @@ impl RangeProof { v: u64, v_blinding: &Scalar, ) -> RangeProof { + use subtle::{Choice, ConditionallyAssignable}; + let B = generators.B; let B_blinding = generators.B_blinding; @@ -75,13 +77,12 @@ impl RangeProof { // Compute A = + + a_blinding * B_blinding. let mut A = B_blinding * a_blinding; for i in 0..n { - let v_i = (v >> i) & 1; - // XXX replace this with a conditional move - if v_i == 0 { - A -= H[i]; - } else { - A += G[i]; - } + // If v_i = 0, we add a_L[i] * G[i] + a_R[i] * H[i] = - H[i] + // If v_i = 1, we add a_L[i] * G[i] + a_R[i] * H[i] = G[i] + let v_i = Choice::from(((v >> i) & 1) as u8); + let mut point = -H[i]; + point.conditional_assign(&G[i], v_i); + A += point; } let s_blinding = Scalar::random(rng); From 945717a3287074e0a2b076f8e832ac12badfba4a Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 9 Apr 2018 16:09:49 -0700 Subject: [PATCH 094/186] Remove the `scalar` module --- src/inner_product_proof.rs | 5 +-- src/lib.rs | 2 - src/scalar.rs | 86 -------------------------------------- 3 files changed, 1 insertion(+), 92 deletions(-) delete mode 100644 src/scalar.rs diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index b738d239..11e39a4b 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -9,9 +9,6 @@ use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; use curve25519_dalek::scalar::Scalar; -// XXX upstream into dalek -use scalar; - use proof_transcript::ProofTranscript; use util; @@ -147,7 +144,7 @@ impl Proof { // 2. Compute 1/(x_k...x_1) and 1/x_k, ..., 1/x_1 let mut challenges_inv = challenges.clone(); - let allinv = scalar::batch_invert(&mut challenges_inv); + let allinv = Scalar::batch_invert(&mut challenges_inv); // 3. Compute x_i^2 and (1/x_i)^2 diff --git a/src/lib.rs b/src/lib.rs index 687c221c..6d78875c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,8 +27,6 @@ mod generators; mod range_proof; mod inner_product_proof; -mod scalar; - pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; pub use generators::{Generators, GeneratorsView}; diff --git a/src/scalar.rs b/src/scalar.rs deleted file mode 100644 index 531d89c7..00000000 --- a/src/scalar.rs +++ /dev/null @@ -1,86 +0,0 @@ -use curve25519_dalek::scalar::Scalar; - -/// Replace each element of `inputs` with its inverse. -/// -/// All inputs must be nonzero. -/// -/// Returns the inverse of the product of all inputs. -pub fn batch_invert(inputs: &mut [Scalar]) -> Scalar { - // First, compute the product of all inputs using a product - // tree: - // - // Inputs: [x_0, x_1, x_2] - // - // Tree: - // - // x_0*x_1*x_2*1 tree[1] - // / \ - // x_0*x_1 x_2*1 tree[2,3] - // / \ / \ - // x_0 x_1 x_2 1 tree[4,5,6,7] - // - // The leaves of the tree are the inputs. We store the tree in - // an array of length 2*n, similar to a binary heap. - // - // To initialize the tree, set every node to 1, then fill in - // the leaf nodes with the input variables. Finally, set every - // non-leaf node to be the product of its children. - let n = inputs.len().next_power_of_two(); - let mut tree = vec![Scalar::one(); 2 * n]; - tree[n..n + inputs.len()].copy_from_slice(inputs); - for i in (1..n).rev() { - tree[i] = &tree[2 * i] * &tree[2 * i + 1]; - } - - // The root of the tree is the product of all inputs, and is - // stored at index 1. Compute its inverse. - let allinv = tree[1].invert(); - - // To compute y_i = 1/x_i, start at the i-th leaf node of the - // tree, and walk up to the root of the tree, multiplying - // `allinv` by each sibling. This computes - // - // y_i = y * (all x_j except x_i) - // - // using lg(n) multiplications for each y_i, taking n*lg(n) in - // total. - for i in 0..inputs.len() { - let mut inv = allinv; - let mut node = n + i; - while node > 1 { - inv *= &tree[node ^ 1]; - node = node >> 1; - } - inputs[i] = inv; - } - - allinv -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn batch_invert_matches_nonbatched() { - let w = Scalar::from_bits( - *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", - ); - let x = Scalar::from_bits( - *b"NZ\xb44]G\x08\x84Y\x13\xb4d\x1b\xc2}RR\xa5\x85\x10\x1b\xccBD\xd4I\xf4\xa8y\xd9\xf2\x04", - ); - let y = Scalar::from_bits( - *b"\x90v3\xfe\x1cKf\xa4\xa2\x8d-\xd7g\x83\x86\xc3S\xd0\xdeTU\xd4\xfc\x9d\xe8\xefz\xc3\x1f5\xbb\x05", - ); - let z = Scalar::from_bits( - *b"\x05\x9d>\x0b\t&P=\xa3\x84\xa1<\x92z\xc2\x06A\x98\xcf4:$\xd5\xb7\xeb3j-\xfc\x11!\x0b", - ); - - let list = vec![w, x, y, z, w * y, x * z, y * y, w * z]; - let mut inv_list = list.clone(); - batch_invert(&mut inv_list[..]); - for i in 0..8 { - assert_eq!(list[i].invert(), inv_list[i]); - } - } -} From a85b162752c31bea34bf652934a0262a26c0ff33 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 9 Apr 2018 17:03:31 -0700 Subject: [PATCH 095/186] Add and test Serde support. This changes the rangeproof test code to explicitly pass a proof from the prover to the verifier. The test code uses bincode for serialization, but the generated proof sizes are quite bloated (up to 200 bytes larger than necessary). Some points: 1. Ser/deser is connected with proof verification, since we want to feed the prover's commitments into the hash. Right now we decompress and recompress, so it would probably be better to have the Proof struct hold compressed points. However, the serde support in `curve25519-dalek` is designed so that the input validation (are points valid? are scalars canonically encoded?) happens automatically during deserialization. The compressed point formats don't implement `Serialize` or `Deserialize`, so that structs with points and scalars are either all valid or just return an error. 2. We could write custom Serialize/Deserialize implementations, but it would be good to have custom types for each resolution of range proof so that we don't need to encode lengths. We also would have to make sure that repeat all of the checks correctly. 3. Bincode is designed for IPC, so its goal is just encoding <= memory size, and I think it encodes lengths as u64s, for instance. So something like CBOR might do better. I feel like the best thing to do is to specialize the `RangeProof` struct by bitsize, and then provide custom `Serialize`/`Deserialize` implementations that work on a fixed-size byte array. This would give compact proof sizes with any Serde backend. --- Cargo.toml | 7 +++- src/inner_product_proof.rs | 2 +- src/lib.rs | 6 +++ src/range_proof.rs | 81 +++++++++++++++++++++++++++++--------- 4 files changed, 75 insertions(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e5de73d1..b9999b9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,11 +4,13 @@ version = "0.1.0" authors = ["Cathie "] [dependencies] -curve25519-dalek = { version = "^0.16", features = ["nightly"] } +curve25519-dalek = { version = "^0.16", features = ["serde", "nightly"] } +subtle = "0.6" sha2 = "^0.7" rand = "^0.4" byteorder = "1.2.1" -subtle = "0.6" +serde = "1" +serde_derive = "1" [dependencies.tiny-keccak] git = 'https://github.com/chain/tiny-keccak.git' @@ -17,6 +19,7 @@ rev = '5925f81b3c351440283c3328e2345d982aac0f6e' [dev-dependencies] hex = "^0.3" criterion = "0.2" +bincode = "1" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 11e39a4b..36c6aa5b 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -17,7 +17,7 @@ use generators::Generators; use sha2::Sha512; -#[derive(Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct Proof { pub(crate) L_vec: Vec, pub(crate) R_vec: Vec, diff --git a/src/lib.rs b/src/lib.rs index 5ffa60b1..16c147ae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,9 +15,15 @@ extern crate sha2; extern crate subtle; extern crate tiny_keccak; +#[macro_use] +extern crate serde_derive; + #[cfg(test)] extern crate test; +#[cfg(test)] +extern crate bincode; + mod util; #[doc(include = "../docs/notes.md")] diff --git a/src/range_proof.rs b/src/range_proof.rs index 1d545e60..1d308e89 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -26,7 +26,7 @@ struct PolyDeg3(Scalar, Scalar, Scalar); struct VecPoly2(Vec, Vec); /// The `RangeProof` struct represents a single range proof. -#[derive(Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { /// Commitment to the value // XXX this should not be included, so that we can prove about existing commitments @@ -336,30 +336,75 @@ mod tests { assert_eq!(power_g, delta(n, &y, &z),); } + /// Given a bitsize `n`, test the full trip: + /// + /// 1. Generate a random value and create a proof that it's in range; + /// 2. Serialize to wire format; + /// 3. Deserialize from wire format; + /// 4. Verify the proof. fn create_and_verify_helper(n: usize) { + // Split the test into two scopes, so that it's explicit what + // data is shared between the prover and the verifier. + + // Use bincode for serialization + use bincode; + + // Both prover and verifier have access to the generators and the proof let generators = Generators::new(n, 1); - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - let mut rng = OsRng::new().unwrap(); - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); + // Serialized proof data + let proof_bytes: Vec; + + // Prover's scope + { + // Use a customization label for testing proofs + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + let mut rng = OsRng::new().unwrap(); + + let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); + let v_blinding = Scalar::random(&mut rng); + + let range_proof = RangeProof::generate_proof( + generators.share(0), + &mut transcript, + &mut rng, + n, + v, + &v_blinding, + ); + + // 2. Serialize + proof_bytes = bincode::serialize(&range_proof).unwrap(); + } - let range_proof = RangeProof::generate_proof( - generators.share(0), - &mut transcript, - &mut rng, + println!( + "Rangeproof with {} bits has size {} bytes", n, - v, - &v_blinding, + proof_bytes.len() ); - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - - assert!( - range_proof - .verify(generators.share(0), &mut transcript, &mut rng, n) - .is_ok() - ); + // Verifier's scope + { + // 3. Deserialize + let range_proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); + let mut rng = OsRng::new().unwrap(); + + // 4. Use the same customization label as above to verify + let mut transcript = ProofTranscript::new(b"RangeproofTest"); + assert!( + range_proof + .verify(generators.share(0), &mut transcript, &mut rng, n) + .is_ok() + ); + + // Verification with a different label fails + let mut transcript = ProofTranscript::new(b""); + assert!( + range_proof + .verify(generators.share(0), &mut transcript, &mut rng, n) + .is_err() + ); + } } #[test] From 337d95c45ffb2ded96f5b2fca1dc2ce44b5c13ad Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 10 Apr 2018 13:09:13 -0700 Subject: [PATCH 096/186] Add warning note to transcript --- src/proof_transcript.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index 264b7bf9..aa15c0cb 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -31,8 +31,10 @@ use byteorder::{ByteOrder, LittleEndian}; /// ensure that their challenge values are bound to the *entire* proof /// transcript, not just the sub-protocol. /// -/// Internally, the `ProofTranscript` uses the Keccak sponge to -/// absorb messages and squeeze challenges. +/// Internally, the `ProofTranscript` is supposed to use Keccak to +/// absorb incoming messages and to squeeze challenges. The +/// construction currently used is ad-hoc, has no security analysis, +/// and is **only suitable for testing**. /// /// # Example /// From 3a2c5340b807a230c5e65955e8b4b182d87b26bf Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 11 Apr 2018 14:00:17 -0700 Subject: [PATCH 097/186] fix imports on benchmarks --- benches/bulletproofs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index 7f109aec..bfedea87 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -9,8 +9,8 @@ extern crate curve25519_dalek; use curve25519_dalek::scalar::Scalar; extern crate ristretto_bulletproofs; -use ristretto_bulletproofs::generators::{Generators, GeneratorsView}; -use ristretto_bulletproofs::proof_transcript::ProofTranscript; +use ristretto_bulletproofs::{Generators, GeneratorsView}; +use ristretto_bulletproofs::ProofTranscript; use ristretto_bulletproofs::RangeProof; fn bench_create_helper(n: usize, c: &mut Criterion) { From 09f6ce3e22b2e81c29d85e9fb2d51b031455f2bb Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 11 Apr 2018 14:40:48 -0700 Subject: [PATCH 098/186] Bump rust version to 2018-04-03 to compile dalek 0.16.3 (#48) --- rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain b/rust-toolchain index 0786e6f1..012938f6 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2018-03-05 +nightly-2018-04-03 From 52a34a0ffa0e63389ba53d33fe815c136bd6956c Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 11 Apr 2018 16:03:08 -0700 Subject: [PATCH 099/186] Add notes on range proof protocol, closes #43 --- docs/inner-product-protocol.md | 63 +++++++---- docs/notes.md | 74 +++++++------ docs/range-proof-protocol.md | 190 +++++++++++++++++++++++++++++++++ src/inner_product_proof.rs | 6 +- src/range_proof.rs | 12 ++- 5 files changed, 285 insertions(+), 60 deletions(-) create mode 100644 docs/range-proof-protocol.md diff --git a/docs/inner-product-protocol.md b/docs/inner-product-protocol.md index ec7201a1..9cb40fe4 100644 --- a/docs/inner-product-protocol.md +++ b/docs/inner-product-protocol.md @@ -1,14 +1,21 @@ +The `inner_product_proof` module contains API for producing a compact proof of an inner product of two vectors. + Inner product argument protocol =============================== +These notes explain how the protocol is implemented in the [`Proof`](struct.Proof.html) type. + We want to prove the relation \\[ \operatorname{PK}\left\\{ - ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P, Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) - : P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q + ({\mathbf{G}}, {\mathbf{H}} \in {\mathbb G}^n, P', Q \in {\mathbb G}; {\mathbf{a}}, {\mathbf{b}} \in {\mathbb Z\_p}^n) + : P' = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q \right\\} \\] where \\(n = 2^{k}\\) is a power of \\(2\\). +Prover’s algorithm +------------------ + To start, we sketch the interactive version of this protocol, and then describe the optimizations discussed in the Bulletproofs paper for the @@ -23,19 +30,19 @@ The protocol consists of \\(k = \lg n\\) rounds, indexed by \end{aligned} \\] and sends \\(L\_{j}, R\_{j}\\) to the verifier. The verifier responds with a -challenge value \\(x\_{j} {\xleftarrow{\\$}}{\mathbb{Z}\_p}\\). The prover uses -\\(x\_{j}\\) to compute +challenge value \\(u\_{j} {\xleftarrow{\\$}}{\mathbb{Z}\_p}\\). The prover uses +\\(u\_{j}\\) to compute \\[ \begin{aligned} - {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ - {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, + {\mathbf{a}} &\gets {\mathbf{a}}\_{\operatorname{lo}} \cdot u\_{j} + u\_{j}^{-1} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \\\\ + {\mathbf{b}} &\gets {\mathbf{b}}\_{\operatorname{lo}} \cdot u\_{j}^{-1} + u\_{j} \cdot {\mathbf{a}}\_{\operatorname{hi}}, \end{aligned} \\] the prover and verifier both compute \\[ \begin{aligned} - {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot x\_{j}^{-1} + x\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ - {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot x\_{j} + x\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, + {\mathbf{G}} &\gets {\mathbf{G}}\_{\operatorname{lo}} \cdot u\_{j}^{-1} + u\_{j} \cdot {\mathbf{G}}\_{\operatorname{hi}}, \\\\ + {\mathbf{H}} &\gets {\mathbf{H}}\_{\operatorname{lo}} \cdot u\_{j} + u\_{j}^{-1} \cdot {\mathbf{H}}\_{\operatorname{hi}}, \end{aligned} \\] and use these vectors (all of length \\(2^{j-1}\\)) for the next round. @@ -44,37 +51,40 @@ After the last (\\(j = 1\\)) round, the prover sends if and only if \\[ \begin{aligned} -L\_{1} x\_{1}^{2} + \cdots + L\_{k} x\_{k}^{2} + P + R\_{k} x\_{k}^{-2} + \cdots + R\_{1} x\_{1}^{-2}&\overset ? = aG + bH + abQ, +L\_{1} u\_{1}^{2} + \cdots + L\_{k} u\_{k}^{2} + P' + R\_{k} u\_{k}^{-2} + \cdots + R\_{1} u\_{1}^{-2}&\overset ? = aG + bH + abQ, \end{aligned} \\] where \\(G, H = {\mathbf{G}}\_{0}, {\mathbf{H}}\_{0}\\). To make the protocol noninteractive, we replace the transmission of the -\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(x\_{j}\\) with a Fiat-Shamir -challenge, so that each \\(x\_{j}\\) is generated as a hash of the transcript +\\(L\_{j}\\) and \\(R\_{j}\\) and the response \\(u\_{j}\\) with a Fiat-Shamir +challenge, so that each \\(u\_{j}\\) is generated as a hash of the transcript \\(L\_{k},R\_{k},\ldots,L\_{j},R\_{j}\\). At the end of the prover’s computation, they send \\(a,b,L\_{k},R\_{k},\ldots,L\_{1},R\_{1}\\) to the verifier. +Verifier’s algorithm +-------------------- + Since the final \\(G\\) and \\(H\\) values are functions of the challenges -\\(x\_{k},\ldots,x\_{1}\\), the verifier has to compute them as part of the +\\(u\_{k},\ldots,u\_{1}\\), the verifier has to compute them as part of the verification process. However, while the prover needs to compute the intermediate vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) in order to compute the \\(L\_{j}\\) and \\(R\_{j}\\), the verifier doesn’t, and can compute the final \\(G\\), \\(H\\) directly from the vectors \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and -the challenges \\(x\_{k}, \ldots, x\_{1}\\). +the challenges \\(u\_{k}, \ldots, u\_{1}\\). Let \\({\mathbf{G}}^{(j)}\\) be the value of \\({\mathbf{G}}\\) in the \\(j\\)-th round, and let \\(G\_{i}\\) be the \\(i\\)-th entry of the initial vector \\({\mathbf{G}}^{(k)} = (G\_{0}, \ldots, G\_{n-1})\\). We have \\[ \begin{aligned} - {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} x\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} x\_{j},\end{aligned} + {\mathbf{G}}^{(j-1)} = ({\mathbf{G}}^{(j)})\_{\operatorname{lo}} u\_{j}^{-1} + ({\mathbf{G}}^{(j)})\_{\operatorname{hi}} u\_{j},\end{aligned} \\] so the coefficient of \\(G\_{i}\\) in the final \\(G\\) value is \\[ \begin{aligned} - s\_{i} &= x\_{k}^{b(i,k)} \cdots x\_1^{b(i,1)},\end{aligned} + s\_{i} &= u\_{k}^{b(i,k)} \cdots u\_1^{b(i,1)},\end{aligned} \\] where \\(b(i,j)\\) is either \\(-1\\) or \\(+1\\), according to whether \\(G\_{i}\\) appears in the left or right half of \\({\mathbf{G}}^{(j)}\\). Since \\(G\_{i}\\) appears in @@ -103,18 +113,29 @@ Since \\(H\\) is computed similarly, but with the roles of \\({\mathbf{H}}\_{\operatorname{lo}}\\) and \\({\mathbf{H}}\_{\operatorname{hi}}\\) reversed, a similar argument shows that \\(H = {\langle 1/{\mathbf{s}}, {\mathbf{H}} \rangle}\\). -Notice that -if \\(i'\\) is the bitwise NOT of \\(i\\), then \\(s\_{i'} = -1/s\_{i}\\), so the vector of inverses \\(1/{\mathbf{s}}\\) is a permutation of -the vector \\({\mathbf{s}}\\) and no additional computation is required to + +Notice that if \\(i'\\) is the bitwise \\(\texttt{NOT}\\) of \\(i\\), then \\(s\_{i'} = 1/s\_{i}\\), +and as \\(i\\) runs from \\(0\\) to \\((2^k - 1)\\), \\(i'\\) runs from \\((2^k - 1)\\) to \\(0\\), +so the vector of inverses \\(1/{\mathbf{s}}\\) is a reversed +vector \\({\mathbf{s}}\\) and no additional computation is required to obtain the \\(1/s\_{i}\\). +### Verification equation + The verifier’s computation then becomes \\[ \begin{aligned} -P \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right) \\\\ -=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{k} x\_{k}^{2} + x\_{k}^{-2} R\_{k} \right), +P' \overset ? =& aG +bH +abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) \\\\ +=& {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle b /{\mathbf{s}}, {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right), \end{aligned} \\] a single multiscalar multiplication with \\(n + n + 1 + k + k = 2(n+k) + 1\\) points. + +In order to combine the computation above with other checks in a parent protocol, we can provide these scalars: + +\\[ + \\{u\_{1}^{2}, \dots, u\_{k}^{2}, u\_{1}^{-2}, \dots, u\_{k}^{-2}, s_0, \dots, s_{n-1}\\}. +\\] + +Use the [`Proof::verification_scalars`](struct.Proof.html#method.verification_scalars) method to produce these scalars for a given inner product proof. diff --git a/docs/notes.md b/docs/notes.md index 18694ffb..0aeb7e20 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -87,7 +87,7 @@ The variable renaming is as follows: \end{aligned} \\] -Range Proofs from Inner Products +Range Proofs from inner products ================================ The goal of a *range proof* is for a prover to convince a verifier @@ -313,7 +313,7 @@ The commitments \\(V\\), \\(T\_{1}\\), \\(T\_{2}\\) are related to each other an \begin{aligned} t(x) B &\quad &= \quad & z^{2}vB & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x t\_{1} B &\quad &+\quad & x^2 t\_{2} B \\\\ + &\quad & \quad & + & \quad & \quad & + & \quad & \quad& + &\quad & \quad & + \\\\ - {\widetilde{t}}(x) {\widetilde{B}} &\quad &= \quad & z^2 {\widetilde{v}} {\widetilde{B}} & \quad &+ \quad & 0 {\widetilde{B}} & \quad &+ \quad& x {\widetilde{t}}\_{1} {\widetilde{B}} &\quad &+\quad & x^{2} {\widetilde{t}}\_{2} {\widetilde{B}} \\\\ + {\tilde{t}}(x) {\widetilde{B}} &\quad &= \quad & z^2 {\widetilde{v}} {\widetilde{B}} & \quad &+ \quad & 0 {\widetilde{B}} & \quad &+ \quad& x {\tilde{t}}\_{1} {\widetilde{B}} &\quad &+\quad & x^{2} {\tilde{t}}\_{2} {\widetilde{B}} \\\\ \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel & \quad & \quad& \shortparallel &\quad & \quad & \shortparallel \\\\ &\quad &= \quad & z V & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x T\_{1} &\quad &+\quad & x^{2} T\_{2} \end{aligned} @@ -321,17 +321,17 @@ The commitments \\(V\\), \\(T\_{1}\\), \\(T\_{2}\\) are related to each other an Notice that the sum of each column is a commitment to the variable in the top row using the blinding factor in the second row. The sum of all of the columns is -\\(t(x) B + {\widetilde{t}}(x) {\widetilde{B}}\\), a commitment to the value +\\(t(x) B + {\tilde{t}}(x) {\widetilde{B}}\\), a commitment to the value of \\(t\\) at the point \\(x\\), using the synthetic blinding factor[^1] \\[ - {\widetilde{t}}(x) = z^{2} {\widetilde{v}} + x {\widetilde{t}}\_{1} + x^{2} {\widetilde{t}}\_{2}. + {\tilde{t}}(x) = z^{2} {\tilde{v}} + x {\tilde{t}}\_{1} + x^{2} {\tilde{t}}\_{2}. \\] To convince the verifier that \\(t(x) = z^2v + \delta(y,z) + t\_{1} x + t\_{2} x^{2}\\), the prover sends -the opening \\(t(x), {\widetilde{t}}(x)\\) to the verifier, who uses the +the opening \\(t(x), {\tilde{t}}(x)\\) to the verifier, who uses the bottom row of the diagram to check consistency: \\[ - t(x) B + {\widetilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. \\] [^1]: The blinding factor is synthetic in the sense that it is @@ -406,7 +406,10 @@ To convince the verifier that sends \\({\widetilde{e}}\\) to the verifier, who uses the bottom row to compute \\[ - P = -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}; +\begin{aligned} + P &= -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{y}}^n + z^2 {\mathbf{2}}^n, {\mathbf{H}}' \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}\\\\ + &= -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{1}} + z^2 {\mathbf{y}^{-n}} \circ {\mathbf{2}}^n, {\mathbf{H}} \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}; +\end{aligned} \\] if the prover is honest, this is \\(P = {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}}' \rangle}\\), @@ -414,7 +417,7 @@ so the verifier uses \\(P\\), \\(t(x)\\) as inputs to the inner-product protocol to prove that \\(t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle}\\). -Inner-Product Proof +Inner-product proof =================== First, let’s observe that the prover can simply send vectors @@ -454,28 +457,28 @@ for the following relation: c &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \end{aligned} \\] -let’s compress these two statements into one equation using an +Let’s compress these two statements into one equation using an indeterminate variable \\(w \in {\mathbb Z\_{p}^{\times}}\\) and multiplying the -second equation by an additional orthogonal generator -\\(Q \in {\mathbb G}\\): +second equation by an orthogonal generator +\\(B \in {\mathbb G}\\): \\[ \begin{aligned} P &{}={}&& {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle}\\\\ &{}+{}&&\\\\ - c \cdot w \cdot Q &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \cdot w \cdot Q + c w B &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} w B \end{aligned} \\] -let’s simplify the resulting equation using the following definitions: +Let’s simplify the resulting equation using the following definitions: \\[ \begin{aligned} k &= \lg n \\\\ - P\_k &= P + c \cdot w \cdot Q \\\\ - \hat{Q} &= w \cdot Q + P' &= P + cwB \\\\ + Q &= wB \end{aligned} \\] The equation becomes: \\[ - P\_k = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \cdot \hat{Q} + P' = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} Q \\] The combined equation is useful because it will allow us to compress each vector in half and arrive to the same form. By doing @@ -483,16 +486,13 @@ such compression \\(\lg n\\) times we will end up with an equation where both vectors are one-element long and we can simply transmit them to check the final equality directly. -If the prover can demonstrate that the above \\(P\_k\\) has such structure -over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(\hat Q\\) for all +If the prover can demonstrate that the above \\(P'\\) has such structure +over generators \\({\mathbf{G}}\\), \\({\mathbf{H}}\\) and \\(Q\\) for all \\(w \in {\mathbb Z\_{p}^{*}}\\), then the original \\(P\\) and \\(c\\) must satisfy the original relation -\\[ -\begin{aligned} - P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} \wedge - c = {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} -\end{aligned} -\\] +\\((P = {\langle {\mathbf{a}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{b}}, {\mathbf{H}} \rangle} +\wedge c = {\langle {\mathbf{a}}, {\mathbf{b}} \rangle})\\). + Let’s introduce an indeterminate variable \\(u\_k \in {\mathbb Z\_{p}^{\times}}\\) and compress the vectors by adding the left and the right halves separated by the variable \\(u\_k\\): @@ -507,9 +507,9 @@ separated by the variable \\(u\_k\\): The powers of \\(u\_k\\) are chosen so they cancel out in the inner products of interest as will be shown below. -Let’s now define \\(P\_{k-1}\\) using the same equation as for \\(P_k\\), but using the compressed vectors: +Let \\(P\_k = P'\\) and define \\(P\_{k-1}\\) using the same equation as for \\(P\_k\\), but using the compressed vectors: \\[ - P\_{k-1} = {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{G}}^{(k-1)} \rangle} + {\langle {\mathbf{b}}^{(k-1)}, {\mathbf{H}}^{(k-1)} \rangle} + {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{b}}^{(k-1)} \rangle} \cdot \hat{Q} + P\_{k-1} = {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{G}}^{(k-1)} \rangle} + {\langle {\mathbf{b}}^{(k-1)}, {\mathbf{H}}^{(k-1)} \rangle} + {\langle {\mathbf{a}}^{(k-1)}, {\mathbf{b}}^{(k-1)} \rangle} \cdot Q \\] Expanding it in terms of the original \\({\mathbf{a}}\\), \\({\mathbf{b}}\\), \\({\mathbf{G}}\\) and \\({\mathbf{H}}\\) gives: @@ -517,7 +517,7 @@ Expanding it in terms of the original \\({\mathbf{a}}\\), \\({\mathbf{b}}\\), \begin{aligned} P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L \cdot u\_k + u\_k^{-1} \cdot {\mathbf{a}}\_R, {\mathbf{G}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{G}}\_R \rangle} + \\\\ && &{\langle {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R, {\mathbf{H}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{H}}\_R \rangle} + \\\\ - && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot \hat{Q} + && &{\langle {\mathbf{a}}\_L \cdot u\_k + u^{-1}\_k \cdot {\mathbf{a}}\_R, {\mathbf{b}}\_L \cdot u^{-1}\_k + u\_k \cdot {\mathbf{b}}\_R \rangle} \cdot Q \end{aligned} \\] Breaking down in simpler products: @@ -525,7 +525,7 @@ Breaking down in simpler products: \begin{aligned} P\_{k-1} &{}={}& &{\langle {\mathbf{a}}\_L, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_R \rangle} &{}+{}& u\_k^2 {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + \\\\ && &{\langle {\mathbf{b}}\_L, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_R \rangle} &{}+{}& u^2\_k {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + u^{-2}\_k {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + \\\\ - && &({\langle {\mathbf{a}}\_L, {\mathbf{b}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_R \rangle})\cdot \hat{Q} &{}+{}& (u^2\_k {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle}) \cdot \hat{Q} + && &({\langle {\mathbf{a}}\_L, {\mathbf{b}}\_L \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_R \rangle})\cdot Q &{}+{}& (u^2\_k {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} + u^{-2}\_k {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle}) \cdot Q \end{aligned} \\] We now see that the left two columns in the above equation is the @@ -535,8 +535,8 @@ terms with \\(u^2\_k\\) as \\(L\_k\\) and all terms with \\(u^{-2}\_k\\) as \\(R \\[ \begin{aligned} P\_{k-1} &= P\_k + u^2\_k \cdot L\_k + u^{-2}\_k \cdot R\_k\\\\ - L\_k &= {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} \cdot \hat{Q}\\\\ - R\_k &= {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle} \cdot \hat{Q} + L\_k &= {\langle {\mathbf{a}}\_L, {\mathbf{G}}\_R \rangle} + {\langle {\mathbf{b}}\_R, {\mathbf{H}}\_L \rangle} + {\langle {\mathbf{a}}\_L, {\mathbf{b}}\_R \rangle} \cdot Q\\\\ + R\_k &= {\langle {\mathbf{a}}\_R, {\mathbf{G}}\_L \rangle} + {\langle {\mathbf{b}}\_L, {\mathbf{H}}\_R \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{b}}\_L \rangle} \cdot Q \end{aligned} \\] If the prover commits to \\(L\_k\\) and \\(R\_k\\) before \\(u\_k\\) is randomly @@ -549,10 +549,20 @@ variable \\(u\_{k-1}\\) in the same way as we used \\(u\_k\\) and arrive to even shorter vectors. We will continue doing so until we end up with vectors \\({\mathbf{a}}^{(0)}, {\mathbf{b}}^{(0)}, {\mathbf{G}}^{(0)}, {\mathbf{H}}^{(0)}\\), -each containing one item: +each containing one item, and \\(P\_0\\) containing all accumulated cross-terms at each step: \\[ - P\_0 = a^{(0)}\_0 \cdot G^{(0)}\_0 + b^{(0)}\_0 \cdot H^{(0)}\_0 + a^{(0)}\_0 \cdot b^{(0)}\_0 \cdot \hat{Q} +\begin{aligned} + P\_0 &= a^{(0)}\_0 G^{(0)}\_0 + b^{(0)}\_0 H^{(0)}\_0 + a^{(0)}\_0 b^{(0)}\_0 Q\\\\ + P\_0 &= P\_k + \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) +\end{aligned} +\\] + +Rewriting the above with the definitions \\(P\_k = P' = P + cwB\\) and \\(Q = wB\\) gives the +final statement: +\\[ + P + c w B = a^{(0)}\_0 G^{(0)}\_0 + b^{(0)}\_0 H^{(0)}\_0 + a^{(0)}\_0 b^{(0)}\_0 wB - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right) \\] + At this point the prover can transmit two scalars \\(a^{(0)}\_0\\) and \\(b^{(0)}\_0\\) to the verifier, so they check the final statement directly by computing both sides of the equation. diff --git a/docs/range-proof-protocol.md b/docs/range-proof-protocol.md new file mode 100644 index 00000000..68c074b8 --- /dev/null +++ b/docs/range-proof-protocol.md @@ -0,0 +1,190 @@ +The `range_proof` module contains API for producing a range proof for a given integer value. + +Range proof protocol +==================== + +This is a documentation for the internal implementation of a range proof. +You may find the introduction to all the pieces of the protocol in the [notes](../notes/index.html) module. + +The range proof is a zero-knowledge proof of the following relation +\\[ +\operatorname{ZK-PK}\left\\{ + v \in {\mathbb Z\_p} + : v \in [0, 2^n) +\right\\} +\\] where \\(n = 2^{k}\\) is a power of \\(2\\). + +Prover’s algorithm +------------------ + +The protocol begins by computing three commitments: to the value \\(v\\), +to the bits of that value \\(\mathbf{a}\_{L}, \mathbf{a}\_{R}\\), +and to the per-bit blinding factors \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\). + +Each bit \\(a_i\\) is committed twice: as \\(a\_{L,i} \gets a\_i\\) and as \\(a\_{R,i} \gets a_i - 1\\). +Similarly for the blinding factors \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\). + +\\[ +\begin{aligned} +V &\gets \operatorname{Com}(v, {\widetilde{v}}) && = v \cdot B + {\widetilde{v}} \cdot {\widetilde{B}} \\\\ +A &\gets \operatorname{Com}({\mathbf{a}}\_{L}, {\mathbf{a}}\_{R}) && = {\langle {\mathbf{a}}\_L, {\mathbf{G}} \rangle} + {\langle {\mathbf{a}}\_R, {\mathbf{H}} \rangle} + {\widetilde{a}} {\widetilde{B}} \\\\ +S &\gets \operatorname{Com}({\mathbf{s}}\_{L}, {\mathbf{s}}\_{R}) && = {\langle {\mathbf{s}}\_L, {\mathbf{G}} \rangle} + {\langle {\mathbf{s}}\_R, {\mathbf{H}} \rangle} + {\widetilde{s}} {\widetilde{B}} \\\\ +\end{aligned} +\\] where \\(\widetilde{v}, \widetilde{a}, \widetilde{s}\\) are sampled randomly +from \\({\mathbb Z\_p}\\) and \\(\mathbf{s}\_{L}, \mathbf{s}\_{R}\\) are sampled randomly from \\({\mathbb Z\_p}^{n}\\). + +The prover adds \\(V, A, S\\) to the protocol transcript +and obtains challenge scalars \\(y,z \in {\mathbb Z\_p}\\). + +Using the challenges and the secret vectors, the prover constructs vector polynomials: +\\[ +\begin{aligned} + {\mathbf{l}}(x) &= {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x \\\\ + {\mathbf{l}}\_{0} &\gets {\mathbf{a}}\_{L} - z {\mathbf{1}} \\\\ + {\mathbf{l}}\_{1} &\gets {\mathbf{s}}\_{L} \\\\ + {\mathbf{r}}\_{0} &\gets {\mathbf{y}}^{n} \circ ({\mathbf{a}}\_{R} + z {\mathbf{1}}) + z^{2} {\mathbf{2}}^{n} \\\\ + {\mathbf{r}}\_{1} &\gets {\mathbf{y}}^{n} \circ {\mathbf{s}}\_{R} +\end{aligned} +\\] + +The inner product of the above vector polynomials is: +\\[ + t(x) = {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} = t\_{0} + t\_{1} x + t\_{2} x^{2}, +\\] + +The prover uses Karatsuba’s method to compute the coefficients of that polynomial as follows: +\\[ +\begin{aligned} + t\_{0} &\gets {\langle {\mathbf{l}}\_{0}, {\mathbf{r}}\_{0} \rangle}, \\\\ + t\_{2} &\gets {\langle {\mathbf{l}}\_{1}, {\mathbf{r}}\_{1} \rangle}, \\\\ + t\_{1} &\gets {\langle {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1}, {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} \rangle} - t\_{0} - t\_{2} +\end{aligned} +\\] + +The prover commits to the terms \\(t_1, t_2\\): +\\[ +\begin{aligned} +T\_1 &\gets \operatorname{Com}(t\_1, {\tilde{t}\_1}) && = t\_1 \cdot B + {\tilde{t}\_1} \cdot {\widetilde{B}} \\\\ +T\_2 &\gets \operatorname{Com}(t\_2, {\tilde{t}\_2}) && = t\_2 \cdot B + {\tilde{t}\_2} \cdot {\widetilde{B}} +\end{aligned} +\\] where \\(\tilde{t}\_1, \tilde{t}\_2\\) are sampled randomly from \\({\mathbb Z\_p}\\). + +The prover adds \\(T_1, T_2\\) to the protocol transcript, +obtains a challenge scalar \\(x \in {\mathbb Z\_p}\\) +and uses it to evaluate the polynomials \\(\mathbf{l}(x), \mathbf{r}(x), t(x)\\): +\\[ +\begin{aligned} + \mathbf{l} &\gets {\mathbf{l}}\_{0} + {\mathbf{l}}\_{1} x\\\\ + \mathbf{r} &\gets {\mathbf{r}}\_{0} + {\mathbf{r}}\_{1} x\\\\ + t(x) &\gets t\_{0} + t\_{1} x + t\_{2} x^{2} +\end{aligned} +\\] + +Next, the prover computes the synthetic blinding factors: +\\[ +\begin{aligned} + {\tilde{t}}(x) &\gets z^{2} {\tilde{v}} + x {\tilde{t}}\_{1} + x^{2} {\tilde{t}}\_{2} \\\\ + \tilde{e} &\gets {\widetilde{a}} + x {\widetilde{s}} +\end{aligned} +\\] + +The prover adds \\(t(x), {\tilde{t}}(x), \tilde{e}\\) to the protocol transcript, +obtains a challenge scalar \\(w \in {\mathbb Z\_p}\\), +and uses it to create a point \\(Q\\): +\\[ + Q \gets w \cdot B +\\] + +The the prover then performs the [inner product argument](../inner_product_proof/index.html) to prove the relation: +\\[ +\operatorname{PK}\left\\{ + ({\mathbf{G}}, {\mathbf{H}}' \in {\mathbb G}^n, P', Q \in {\mathbb G}; {\mathbf{l}}, {\mathbf{r}} \in {\mathbb Z\_p}^n) + : P' = {\langle {\mathbf{l}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}, {\mathbf{H}}' \rangle} + {\langle {\mathbf{l}}, {\mathbf{r}} \rangle} Q +\right\\} +\\] where \\({\mathbf{H}}' = {\mathbf{y}}^{-n} \circ {\mathbf{H}}\\). + +The result of the inner product proof is a list of \\(2k\\) points and \\(2\\) scalars: \\(\\{L\_k, R\_k, \\dots, L\_1, R\_1, a, b\\}\\). + +The complete range proof consists of \\(9+2k\\) 32-byte elements: +\\[ + \\{A, S, T_1, T_2, t(x), {\tilde{t}}(x), \tilde{e}, L\_k, R\_k, \\dots, L\_1, R\_1, a, b\\} +\\] + + +Verifier’s algorithm +-------------------- + +Verifier’s input is the range size \\(n = 2^k\\) (in bits), value commitment \\(V\\), and \\(32 \cdot (9 + 2 k)\\) bytes of the proof data: +\\[ + \\{A, S, T_1, T_2, t(x), {\tilde{t}}(x), \tilde{e}, L\_{k}, R\_{k}, \\dots, L\_1, R\_1, a, b\\} +\\] + +Verifier uses Fiat-Shamir transform to obtain challenges by adding the appropriate data sequentially to the protocol transcript: + +1. \\(V, A, S\\) are added to obtain challenge scalars \\(y,z \in {\mathbb Z\_p}\\), +2. \\(T_1, T_2\\) are added to obtain a challenge \\(x \in {\mathbb Z\_p}\\), +3. \\(t(x), {\tilde{t}}(x), \tilde{e}\\) are added to obtain a challenge \\(w \in {\mathbb Z\_p}\\). + +Verifier computes the following scalars for the [inner product argument](../inner_product_proof/index.html): + +\\[ + \\{u\_{1}^{2}, \dots, u\_{k}^{2}, u\_{1}^{-2}, \dots, u\_{k}^{-2}, s_0, \dots, s_{n-1}\\} +\\] + +The goal of the verifier is to check two equations: + +1. First, verify the constant term of the polynomial \\(t(x)\\) (see [notes](../notes/index.html#proving-that-t_0-is-correct)): + + \\[ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. + \\] + where \\(\delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{n} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{n} \rangle\\). + + Rewriting as a comparison with the identity point: + \\[ + 0 \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2} - t(x) B - {\tilde{t}}(x) {\widetilde{B}}. + \\] + +2. Second, verify the inner product argument for the vectors \\(\mathbf{l}(x), \mathbf{r}(x)\\) that form the \\(t(x)\\) (see [inner-product protocol](../inner_product_proof/index.html#verification-equation)) + + \\[ + P' \overset ? = {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} + {\langle {\mathbf{y}^{-n}} \circ (b /{\mathbf{s}}), {\mathbf{H}} \rangle} + abQ - \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right). + \\] + + Rewriting as a comparison with the identity point and expanding \\(Q = wB\\) and \\(P' = P + t(x) wB\\) as [needed for transition to the inner-product protocol](../notes/index.html#inner-product-proof): + + \\[ + 0 \overset ? = P + t(x) wB - {\langle a \cdot {\mathbf{s}}, {\mathbf{G}} \rangle} - {\langle {\mathbf{y}^{-n}} \circ (b /{\mathbf{s}}), {\mathbf{H}} \rangle} - abwB + \sum\_{j=1}^{k} \left( L\_{j} u\_{j}^{2} + u\_{j}^{-2} R\_{j} \right), + \\] + where the [definition](../notes/index.html#proving-that-mathbflx-mathbfrx-are-correct) of \\(P\\) is: + \\[ + P = -{\widetilde{e}} {\widetilde{B}} + A + x S + {\langle z {\mathbf{1}} + z^2 {\mathbf{y}^{-n}} \circ {\mathbf{2}}^n, {\mathbf{H}} \rangle} - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle}. +\\] + +Verifier combines two equations in one by sampling a random factor \\(c \\; {\xleftarrow{\\$}} \\; {\mathbb Z\_p}\\), +multiplying the first equation by \\(c\\), and adding it with the second equation. + +Finally, verifier groups all scalars per each point and performs a single multiscalar multiplication over \\((7 + 2n + 2k)\\) points: + +\\[ +\begin{aligned} +0 \quad \stackrel{?}{=} & \quad 1 \cdot A \\\\ + + & \quad x \cdot S \\\\ + + & \quad cz^2 \cdot V \\\\ + + & \quad cx \cdot T_1 \\\\ + + & \quad cx^2 \cdot T_2 \\\\ + + & \quad \Big(w \big(t(x) - ab\big) + c \big(\delta(y,z) - t(x)\big) \Big) \cdot B\\\\ + + & \quad (-{\widetilde{e}} - c{\tilde{t}}(x)) \cdot \widetilde{B} \\\\ + + & \quad {\langle {-z\mathbf{1} - a\mathbf{s}}, {\mathbf{G}} \rangle}\\\\ + + & \quad {\langle {z\mathbf{1} + {\mathbf{y}}^{-n} \circ (z^2\mathbf{2}^n - b/{\mathbf{s}})}, {\mathbf{H}} \rangle}\\\\ + + & \quad {\langle [x_{1}^2, \dots, x_{k}^2 ], [L_1, \dots, L_{k}] \rangle}\\\\ + + & \quad {\langle [x_{1}^{-2}, \dots, x_{k}^{-2} ], [R_1, \dots, R_{k}] \rangle} +\end{aligned} +\\] where \\(1/{\mathbf{s}}\\) are inverses of \\(\mathbf{s}\\), computed as a reversed list of \\(\mathbf{s}\\). + + + + + + diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 36c6aa5b..8c05d3c1 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -28,8 +28,8 @@ pub struct Proof { impl Proof { /// Create an inner-product proof. /// - /// The proof is created with respect to the bases G, Hprime, - /// where Hprime[i] = H[i] * Hprime_factors[i]. + /// The proof is created with respect to the bases \\(G\\), \\(H'\\), + /// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\). /// /// The `verifier` is passed in as a parameter so that the /// challenges depend on the *entire* transcript (including parent @@ -123,6 +123,8 @@ impl Proof { }; } + /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication + /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. pub(crate) fn verification_scalars( &self, transcript: &mut ProofTranscript, diff --git a/src/range_proof.rs b/src/range_proof.rs index 1d308e89..066f9761 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -1,5 +1,7 @@ #![allow(non_snake_case)] +#![doc(include = "../docs/range-proof-protocol.md")] + use rand::Rng; use std::iter; @@ -224,10 +226,10 @@ impl RangeProof { .chain(iter::once(c * zz)) .chain(iter::once(c * x)) .chain(iter::once(c * x * x)) - .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) .chain(iter::once( w * (self.t_x - a * b) + c * (delta(n, &y, &z) - self.t_x), )) + .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) .chain(g) .chain(h) .chain(x_sq.iter().cloned()) @@ -237,8 +239,8 @@ impl RangeProof { .chain(iter::once(&self.V)) .chain(iter::once(&self.T_1)) .chain(iter::once(&self.T_2)) - .chain(iter::once(gens.B_blinding)) .chain(iter::once(gens.B)) + .chain(iter::once(gens.B_blinding)) .chain(gens.G.iter()) .chain(gens.H.iter()) .chain(self.ipp_proof.L_vec.iter()) @@ -254,9 +256,9 @@ impl RangeProof { } /// Compute -/// $$ -/// \\delta(y,z) = (z - z^2)<1, y^n> + z^3 <1, 2^n> -/// $$ +/// \\[ +/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{n} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{n} \rangle +/// \\] fn delta(n: usize, y: &Scalar, z: &Scalar) -> Scalar { let two = Scalar::from_u64(2); From b40a1bbc451112dd3bb6f2a7fa37c495fda9dfbb Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 11:02:32 -0700 Subject: [PATCH 100/186] Generators API accepts arbitrary pair of generators via PedersenGenerators struct (#26) --- benches/bulletproofs.rs | 7 +++--- src/generators.rs | 46 +++++++++++++++++++++++--------------- src/inner_product_proof.rs | 13 ++++++----- src/lib.rs | 2 +- src/range_proof.rs | 9 ++++---- 5 files changed, 45 insertions(+), 32 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index bfedea87..b5a23901 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -1,3 +1,4 @@ +#![allow(non_snake_case)] #[macro_use] extern crate criterion; use criterion::Criterion; @@ -9,13 +10,13 @@ extern crate curve25519_dalek; use curve25519_dalek::scalar::Scalar; extern crate ristretto_bulletproofs; -use ristretto_bulletproofs::{Generators, GeneratorsView}; +use ristretto_bulletproofs::{PedersenGenerators, Generators}; use ristretto_bulletproofs::ProofTranscript; use ristretto_bulletproofs::RangeProof; fn bench_create_helper(n: usize, c: &mut Criterion) { c.bench_function(&format!("create_rangeproof_n_{}", n), move |b| { - let generators = Generators::new(n, 1); + let generators = Generators::new(PedersenGenerators::default(), n, 1); let mut rng = OsRng::new().unwrap(); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); @@ -39,7 +40,7 @@ fn bench_create_helper(n: usize, c: &mut Criterion) { fn bench_verify_helper(n: usize, c: &mut Criterion) { c.bench_function(&format!("verify_rangeproof_n_{}", n), move |b| { - let generators = Generators::new(n, 1); + let generators = Generators::new(PedersenGenerators::default(), n, 1); let mut rng = OsRng::new().unwrap(); let mut transcript = ProofTranscript::new(b"RangeproofTest"); diff --git a/src/generators.rs b/src/generators.rs index 1791034b..08436afe 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -6,10 +6,9 @@ //! //! ``` //! # extern crate ristretto_bulletproofs; -//! # use ristretto_bulletproofs::Generators; +//! # use ristretto_bulletproofs::{PedersenGenerators,Generators}; //! # fn main() { -//! -//! let generators = Generators::new(64,1); +//! let generators = Generators::new(PedersenGenerators::default(), 64,1); //! let view = generators.all(); //! let G0 = view.G[0]; //! let H0 = view.H[0]; @@ -89,29 +88,41 @@ pub struct GeneratorsView<'a> { pub H: &'a [RistrettoPoint], } +/// Entry point for producing a pair of base points for Pedersen commitments. +pub struct PedersenGenerators(RistrettoPoint, RistrettoPoint); + +impl PedersenGenerators { + /// Constructs a pair of pedersen generators + /// from a pair of generators provided by the user. + pub fn new(A: RistrettoPoint, B: RistrettoPoint) -> Self { + PedersenGenerators(A,B) + } +} + +impl Default for PedersenGenerators { + fn default() -> Self { + PedersenGenerators( + GeneratorsChain::new(b"Bulletproofs.Generators.B").next().unwrap(), + GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding").next().unwrap() + ) + } +} + impl Generators { /// Creates generators for `m` range proofs of `n` bits each. - pub fn new(n: usize, m: usize) -> Self { - // Using unwrap is safe here, because the iterator is unbounded. - let B = GeneratorsChain::new(b"Bulletproofs.Generators.B") - .next() - .unwrap(); - let B_blinding = GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding") - .next() - .unwrap(); - - let G = GeneratorsChain::new(b"Bulletproofs.Generators.G") + pub fn new(pedersen_generators: PedersenGenerators, n: usize, m: usize) -> Self { + let G = GeneratorsChain::new(pedersen_generators.0.compress().as_bytes()) .take(n * m) .collect(); - let H = GeneratorsChain::new(b"Bulletproofs.Generators.H") + let H = GeneratorsChain::new(pedersen_generators.1.compress().as_bytes()) .take(n * m) .collect(); Generators { n, m, - B, - B_blinding, + B: pedersen_generators.0, + B_blinding: pedersen_generators.1, G, H, } @@ -145,13 +156,12 @@ impl Generators { mod tests { extern crate hex; use super::*; - use curve25519_dalek::constants::RISTRETTO_BASEPOINT_POINT; #[test] fn rangeproof_generators() { let n = 2; let m = 3; - let gens = Generators::new(n, m); + let gens = Generators::new(PedersenGenerators::default(), n, m); // The concatenation of shares must be the full generator set assert_eq!( diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 8c05d3c1..77f4dceb 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -13,10 +13,6 @@ use proof_transcript::ProofTranscript; use util; -use generators::Generators; - -use sha2::Sha512; - #[derive(Serialize, Deserialize, Clone, Debug)] pub struct Proof { pub(crate) L_vec: Vec, @@ -174,6 +170,11 @@ impl Proof { (challenges_sq, challenges_inv_sq, s) } + /// This method is for testing that proof generation work, + /// but for efficiency the actual protocols would use `verification_scalars` + /// method to combine inner product verification with other checks + /// in a single multiscalar multiplication. + #[allow(dead_code)] pub fn verify( &self, transcript: &mut ProofTranscript, @@ -228,11 +229,13 @@ mod tests { use super::*; use rand::OsRng; + use sha2::Sha512; fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); - let gens = Generators::new(n, 1); + use generators::{PedersenGenerators,Generators}; + let gens = Generators::new(PedersenGenerators::default(), n, 1); let G = gens.share(0).G.to_vec(); let H = gens.share(0).H.to_vec(); diff --git a/src/lib.rs b/src/lib.rs index 16c147ae..0fcb7302 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -36,4 +36,4 @@ mod inner_product_proof; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; -pub use generators::{Generators, GeneratorsView}; +pub use generators::{PedersenGenerators, Generators, GeneratorsView}; diff --git a/src/range_proof.rs b/src/range_proof.rs index 066f9761..22ff8421 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -6,11 +6,9 @@ use rand::Rng; use std::iter; -use sha2::{Digest, Sha512}; - use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; -use curve25519_dalek::traits::{Identity, IsIdentity}; +use curve25519_dalek::traits::IsIdentity; use curve25519_dalek::scalar::Scalar; // XXX rename this maybe ?? at least `inner_product_proof::Proof` is too long. @@ -21,7 +19,7 @@ use proof_transcript::ProofTranscript; use util; -use generators::{Generators, GeneratorsView}; +use generators::GeneratorsView; struct PolyDeg3(Scalar, Scalar, Scalar); @@ -352,7 +350,8 @@ mod tests { use bincode; // Both prover and verifier have access to the generators and the proof - let generators = Generators::new(n, 1); + use generators::{PedersenGenerators,Generators}; + let generators = Generators::new(PedersenGenerators::default(), n, 1); // Serialized proof data let proof_bytes: Vec; From 19c47efc6d011ed49e243538865112f2bfd20db7 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 14:20:11 -0700 Subject: [PATCH 101/186] Refine API (#49) - Move `V` out of the RangeProof and provide it separately (to match the actual use-case). - Rename `inner_product_proof::Proof` into `inner_product_proof::InnerProductProof` for clarity. - Move `util::inner_product` into `inner_product_proof::inner_product` for completeness (any protocol using IPP needs to compute the actual inner product of some values over the secret inputs). - Move `VecPoly1` and `Poly2` into `util` module. - Utility for making pedersen commitments more convenient. This addresses issue #32. --- benches/bulletproofs.rs | 7 ++- docs/inner-product-protocol.md | 4 +- src/generators.rs | 53 +++++++++++-------- src/inner_product_proof.rs | 54 +++++++++++++++---- src/range_proof.rs | 96 ++++++++++++---------------------- src/util.rs | 74 ++++++++++++++++---------- 6 files changed, 159 insertions(+), 129 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index b5a23901..d6d2911a 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -40,13 +40,16 @@ fn bench_create_helper(n: usize, c: &mut Criterion) { fn bench_verify_helper(n: usize, c: &mut Criterion) { c.bench_function(&format!("verify_rangeproof_n_{}", n), move |b| { - let generators = Generators::new(PedersenGenerators::default(), n, 1); + let pg = PedersenGenerators::default(); + let generators = Generators::new(pg.clone(), n, 1); let mut rng = OsRng::new().unwrap(); let mut transcript = ProofTranscript::new(b"RangeproofTest"); let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); + let vc = pg.commit(Scalar::from_u64(v), v_blinding); + let rp = RangeProof::generate_proof( generators.share(0), &mut transcript, @@ -60,7 +63,7 @@ fn bench_verify_helper(n: usize, c: &mut Criterion) { // Each verification requires a clean transcript. let mut transcript = ProofTranscript::new(b"RangeproofTest"); - rp.verify(generators.share(0), &mut transcript, &mut rng, n) + rp.verify(&vc, generators.share(0), &mut transcript, &mut rng, n) }); }); } diff --git a/docs/inner-product-protocol.md b/docs/inner-product-protocol.md index 9cb40fe4..ed9fc9df 100644 --- a/docs/inner-product-protocol.md +++ b/docs/inner-product-protocol.md @@ -3,7 +3,7 @@ The `inner_product_proof` module contains API for producing a compact proof of a Inner product argument protocol =============================== -These notes explain how the protocol is implemented in the [`Proof`](struct.Proof.html) type. +These notes explain how the protocol is implemented in the [`InnerProductProof`](struct.InnerProductProof.html) type. We want to prove the relation \\[ @@ -138,4 +138,4 @@ In order to combine the computation above with other checks in a parent protocol \\{u\_{1}^{2}, \dots, u\_{k}^{2}, u\_{1}^{-2}, \dots, u\_{k}^{-2}, s_0, \dots, s_{n-1}\\}. \\] -Use the [`Proof::verification_scalars`](struct.Proof.html#method.verification_scalars) method to produce these scalars for a given inner product proof. +Use the [`InnerProductProof::verification_scalars`](struct.InnerProductProof.html#method.verification_scalars) method to produce these scalars for a given inner product proof. diff --git a/src/generators.rs b/src/generators.rs index 08436afe..1ea4aed7 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -21,7 +21,9 @@ // XXX we should use Sha3 everywhere +use curve25519_dalek::ristretto; use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; use sha2::{Digest, Sha512}; /// The `GeneratorsChain` creates an arbitrary-long sequence of orthogonal generators. @@ -66,10 +68,8 @@ pub struct Generators { pub n: usize, /// Number of values or parties pub m: usize, - /// Main base of a Pedersen commitment - B: RistrettoPoint, - /// Base for the blinding factor in a Pedersen commitment - B_blinding: RistrettoPoint, + /// Bases for Pedersen commitments + pedersen_generators: PedersenGenerators, /// Per-bit generators for the bit values G: Vec, /// Per-bit generators for the bit blinding factors @@ -78,10 +78,8 @@ pub struct Generators { /// Represents a view into `Generators` relevant to a specific range proof. pub struct GeneratorsView<'a> { - /// Main base of a Pedersen commitment - pub B: &'a RistrettoPoint, - /// Base for the blinding factor in a Pedersen commitment - pub B_blinding: &'a RistrettoPoint, + /// Bases for Pedersen commitments + pub pedersen_generators: &'a PedersenGenerators, /// Per-bit generators for the bit values pub G: &'a [RistrettoPoint], /// Per-bit generators for the bit blinding factors @@ -89,40 +87,51 @@ pub struct GeneratorsView<'a> { } /// Entry point for producing a pair of base points for Pedersen commitments. -pub struct PedersenGenerators(RistrettoPoint, RistrettoPoint); +#[derive(Clone)] +pub struct PedersenGenerators { + /// Base for the committed value + pub B: RistrettoPoint, + + /// Base for the blinding factor + pub B_blinding: RistrettoPoint, +} impl PedersenGenerators { /// Constructs a pair of pedersen generators /// from a pair of generators provided by the user. - pub fn new(A: RistrettoPoint, B: RistrettoPoint) -> Self { - PedersenGenerators(A,B) + pub fn new(B: RistrettoPoint, B_blinding: RistrettoPoint) -> Self { + PedersenGenerators{B,B_blinding} + } + + /// Creates a pedersen commitment using the value scalar and a blinding factor. + pub fn commit(&self, value: Scalar, blinding: Scalar) -> RistrettoPoint { + ristretto::multiscalar_mul(&[value, blinding], &[self.B, self.B_blinding]) } } impl Default for PedersenGenerators { fn default() -> Self { - PedersenGenerators( - GeneratorsChain::new(b"Bulletproofs.Generators.B").next().unwrap(), - GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding").next().unwrap() - ) + PedersenGenerators { + B: GeneratorsChain::new(b"Bulletproofs.Generators.B").next().unwrap(), + B_blinding: GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding").next().unwrap() + } } } impl Generators { /// Creates generators for `m` range proofs of `n` bits each. pub fn new(pedersen_generators: PedersenGenerators, n: usize, m: usize) -> Self { - let G = GeneratorsChain::new(pedersen_generators.0.compress().as_bytes()) + let G = GeneratorsChain::new(pedersen_generators.B.compress().as_bytes()) .take(n * m) .collect(); - let H = GeneratorsChain::new(pedersen_generators.1.compress().as_bytes()) + let H = GeneratorsChain::new(pedersen_generators.B_blinding.compress().as_bytes()) .take(n * m) .collect(); Generators { n, m, - B: pedersen_generators.0, - B_blinding: pedersen_generators.1, + pedersen_generators: pedersen_generators, G, H, } @@ -131,8 +140,7 @@ impl Generators { /// Returns a view into the entirety of the generators. pub fn all(&self) -> GeneratorsView { GeneratorsView { - B: &self.B, - B_blinding: &self.B_blinding, + pedersen_generators: &self.pedersen_generators, G: &self.G[..], H: &self.H[..], } @@ -144,8 +152,7 @@ impl Generators { let lower = self.n * j; let upper = self.n * (j + 1); GeneratorsView { - B: &self.B, - B_blinding: &self.B_blinding, + pedersen_generators: &self.pedersen_generators, G: &self.G[lower..upper], H: &self.H[lower..upper], } diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 77f4dceb..1ba3f349 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -11,17 +11,15 @@ use curve25519_dalek::scalar::Scalar; use proof_transcript::ProofTranscript; -use util; - #[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Proof { +pub struct InnerProductProof { pub(crate) L_vec: Vec, pub(crate) R_vec: Vec, pub(crate) a: Scalar, pub(crate) b: Scalar, } -impl Proof { +impl InnerProductProof { /// Create an inner-product proof. /// /// The proof is created with respect to the bases \\(G\\), \\(H'\\), @@ -38,7 +36,7 @@ impl Proof { mut H_vec: Vec, mut a_vec: Vec, mut b_vec: Vec, - ) -> Proof + ) -> InnerProductProof where I: IntoIterator, I::Item: Borrow, @@ -76,8 +74,8 @@ impl Proof { let (G_L, G_R) = G.split_at_mut(n); let (H_L, H_R) = H.split_at_mut(n); - let c_L = util::inner_product(&a_L, &b_R); - let c_R = util::inner_product(&a_R, &b_L); + let c_L = inner_product(&a_L, &b_R); + let c_R = inner_product(&a_R, &b_L); let L = ristretto::vartime::multiscalar_mul( a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)), @@ -111,7 +109,7 @@ impl Proof { H = H_L; } - return Proof { + return InnerProductProof { L_vec: L_vec, R_vec: R_vec, a: a[0], @@ -224,12 +222,31 @@ impl Proof { } } + +/// Computes an inner product of two vectors +/// \\[ +/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. +/// \\] +/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. +pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { + let mut out = Scalar::zero(); + if a.len() != b.len() { + panic!("inner_product(a,b): lengths of vectors do not match"); + } + for i in 0..a.len() { + out += a[i] * b[i]; + } + out +} + + #[cfg(test)] mod tests { use super::*; use rand::OsRng; use sha2::Sha512; + use util; fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); @@ -245,7 +262,7 @@ mod tests { // a and b are the vectors for which we want to prove c = let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect(); - let c = util::inner_product(&a, &b); + let c = inner_product(&a, &b); // y_inv is (the inverse of) a random challenge let y_inv = Scalar::random(&mut rng); @@ -265,7 +282,7 @@ mod tests { ); let mut verifier = ProofTranscript::new(b"innerproducttest"); - let proof = Proof::create( + let proof = InnerProductProof::create( &mut verifier, &Q, util::exp_iter(y_inv), @@ -307,4 +324,21 @@ mod tests { fn make_ipp_64() { test_helper_create(64); } + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } } diff --git a/src/range_proof.rs b/src/range_proof.rs index 22ff8421..37afad77 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -11,9 +11,7 @@ use curve25519_dalek::ristretto; use curve25519_dalek::traits::IsIdentity; use curve25519_dalek::scalar::Scalar; -// XXX rename this maybe ?? at least `inner_product_proof::Proof` is too long. -// maybe `use inner_product_proof::IPProof` would be better? -use inner_product_proof; +use inner_product_proof::InnerProductProof; use proof_transcript::ProofTranscript; @@ -21,17 +19,9 @@ use util; use generators::GeneratorsView; -struct PolyDeg3(Scalar, Scalar, Scalar); - -struct VecPoly2(Vec, Vec); - /// The `RangeProof` struct represents a single range proof. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { - /// Commitment to the value - // XXX this should not be included, so that we can prove about existing commitments - // included for now so that it's easier to test - V: RistrettoPoint, /// Commitment to the bits of the value A: RistrettoPoint, /// Commitment to the blinding factors @@ -47,7 +37,7 @@ pub struct RangeProof { /// Blinding factor for the synthetic commitment to the inner-product arguments e_blinding: Scalar, /// Proof data for the inner-product argument. - ipp_proof: inner_product_proof::Proof, + ipp_proof: InnerProductProof, } impl RangeProof { @@ -62,20 +52,17 @@ impl RangeProof { ) -> RangeProof { use subtle::{Choice, ConditionallyAssignable}; - let B = generators.B; - let B_blinding = generators.B_blinding; - // Create copies of G, H, so we can pass them to the // (consuming) IPP API later. let G = generators.G.to_vec(); let H = generators.H.to_vec(); - let V = ristretto::multiscalar_mul(&[Scalar::from_u64(v), *v_blinding], &[*B, *B_blinding]); + let V = generators.pedersen_generators.commit(Scalar::from_u64(v), *v_blinding); let a_blinding = Scalar::random(rng); // Compute A = + + a_blinding * B_blinding. - let mut A = B_blinding * a_blinding; + let mut A = generators.pedersen_generators.B_blinding * a_blinding; for i in 0..n { // If v_i = 0, we add a_L[i] * G[i] + a_R[i] * H[i] = - H[i] // If v_i = 1, we add a_L[i] * G[i] + a_R[i] * H[i] = G[i] @@ -92,7 +79,7 @@ impl RangeProof { // Compute S = + + s_blinding * B_blinding. let S = ristretto::multiscalar_mul( iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), - iter::once(B_blinding).chain(G.iter()).chain(H.iter()), + iter::once(&generators.pedersen_generators.B_blinding).chain(G.iter()).chain(H.iter()), ); // Commit to V, A, S and get challenges y, z @@ -104,8 +91,8 @@ impl RangeProof { let zz = z * z; // Compute l, r - let mut l_poly = VecPoly2::zero(n); - let mut r_poly = VecPoly2::zero(n); + let mut l_poly = util::VecPoly1::zero(n); + let mut r_poly = util::VecPoly1::zero(n); let mut exp_y = Scalar::one(); // start at y^0 = 1 let mut exp_2 = Scalar::one(); // start at 2^0 = 1 @@ -128,8 +115,8 @@ impl RangeProof { // Form commitments T_1, T_2 to t.1, t.2 let t_1_blinding = Scalar::random(rng); let t_2_blinding = Scalar::random(rng); - let T_1 = ristretto::multiscalar_mul(&[t_poly.1, t_1_blinding], &[*B, *B_blinding]); - let T_2 = ristretto::multiscalar_mul(&[t_poly.2, t_2_blinding], &[*B, *B_blinding]); + let T_1 = generators.pedersen_generators.commit(t_poly.1, t_1_blinding); + let T_2 = generators.pedersen_generators.commit(t_poly.2, t_2_blinding); // Commit to T_1, T_2 to get the challenge point x transcript.commit(T_1.compress().as_bytes()); @@ -137,7 +124,7 @@ impl RangeProof { let x = transcript.challenge_scalar(); // Evaluate t at x and run the IPP - let t_x = t_poly.0 + x * (t_poly.1 + x * t_poly.2); + let t_x = t_poly.eval(x); let t_x_blinding = zz * v_blinding + x * (t_1_blinding + x * t_2_blinding); let e_blinding = a_blinding + x * s_blinding; @@ -147,10 +134,10 @@ impl RangeProof { // Get a challenge value to combine statements for the IPP let w = transcript.challenge_scalar(); - let Q = w * B; + let Q = w * generators.pedersen_generators.B; // Generate the IPP proof - let ipp_proof = inner_product_proof::Proof::create( + let ipp_proof = InnerProductProof::create( transcript, &Q, util::exp_iter(y.invert()), @@ -161,7 +148,6 @@ impl RangeProof { ); RangeProof { - V, A, S, T_1, @@ -175,6 +161,7 @@ impl RangeProof { pub fn verify( &self, + V: &RistrettoPoint, gens: GeneratorsView, transcript: &mut ProofTranscript, rng: &mut R, @@ -183,7 +170,7 @@ impl RangeProof { // First, replay the "interactive" protocol using the proof // data to recompute all challenges. - transcript.commit(self.V.compress().as_bytes()); + transcript.commit(V.compress().as_bytes()); transcript.commit(self.A.compress().as_bytes()); transcript.commit(self.S.compress().as_bytes()); @@ -234,11 +221,11 @@ impl RangeProof { .chain(x_inv_sq.iter().cloned()), iter::once(&self.A) .chain(iter::once(&self.S)) - .chain(iter::once(&self.V)) + .chain(iter::once(V)) .chain(iter::once(&self.T_1)) .chain(iter::once(&self.T_2)) - .chain(iter::once(gens.B)) - .chain(iter::once(gens.B_blinding)) + .chain(iter::once(&gens.pedersen_generators.B)) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) .chain(gens.G.iter()) .chain(gens.H.iter()) .chain(self.ipp_proof.L_vec.iter()) @@ -274,37 +261,6 @@ fn delta(n: usize, y: &Scalar, z: &Scalar) -> Scalar { (z - zz) * sum_of_powers_of_y - z * zz * sum_of_powers_of_2 } -impl VecPoly2 { - pub fn zero(n: usize) -> VecPoly2 { - VecPoly2(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) - } - - pub fn inner_product(&self, rhs: &VecPoly2) -> PolyDeg3 { - // Uses Karatsuba's method - let l = self; - let r = rhs; - - let t0 = util::inner_product(&l.0, &r.0); - let t2 = util::inner_product(&l.1, &r.1); - - let l0_plus_l1 = util::add_vec(&l.0, &l.1); - let r0_plus_r1 = util::add_vec(&r.0, &r.1); - - let t1 = util::inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; - - PolyDeg3(t0, t1, t2) - } - - pub fn eval(&self, x: Scalar) -> Vec { - let n = self.0.len(); - let mut out = vec![Scalar::zero(); n]; - for i in 0..n { - out[i] += self.0[i] + self.1[i] * x; - } - out - } -} - #[cfg(test)] mod tests { use super::*; @@ -355,6 +311,7 @@ mod tests { // Serialized proof data let proof_bytes: Vec; + let value_commitment: RistrettoPoint; // Prover's scope { @@ -376,6 +333,9 @@ mod tests { // 2. Serialize proof_bytes = bincode::serialize(&range_proof).unwrap(); + + let gens = generators.share(0); + value_commitment = gens.pedersen_generators.commit(Scalar::from_u64(v), v_blinding); } println!( @@ -394,7 +354,12 @@ mod tests { let mut transcript = ProofTranscript::new(b"RangeproofTest"); assert!( range_proof - .verify(generators.share(0), &mut transcript, &mut rng, n) + .verify( + &value_commitment, + generators.share(0), + &mut transcript, + &mut rng, + n) .is_ok() ); @@ -402,7 +367,12 @@ mod tests { let mut transcript = ProofTranscript::new(b""); assert!( range_proof - .verify(generators.share(0), &mut transcript, &mut rng, n) + .verify( + &value_commitment, + generators.share(0), + &mut transcript, + &mut rng, + n) .is_err() ); } diff --git a/src/util.rs b/src/util.rs index 58224a47..00c4a8a1 100644 --- a/src/util.rs +++ b/src/util.rs @@ -2,6 +2,13 @@ #![allow(non_snake_case)] use curve25519_dalek::scalar::Scalar; +use inner_product_proof::inner_product; + +/// Represents a degree-1 vector polynomial \\(\mathbf{a} + \mathbf{b} \cdot x\\). +pub struct VecPoly1(pub Vec, pub Vec); + +/// Represents a degree-2 scalar polynomial \\(a + b \cdot x + c \cdot x^2\\) +pub struct Poly2(pub Scalar, pub Scalar, pub Scalar); /// Provides an iterator over the powers of a `Scalar`. /// @@ -27,18 +34,6 @@ pub fn exp_iter(x: Scalar) -> ScalarExp { ScalarExp { x, next_exp_x } } -pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { - let mut out = Scalar::zero(); - if a.len() != b.len() { - // throw some error - println!("lengths of vectors don't match for inner product multiplication"); - } - for i in 0..a.len() { - out += a[i] * b[i]; - } - out -} - pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { let mut out = Vec::new(); if a.len() != b.len() { @@ -51,6 +46,44 @@ pub fn add_vec(a: &[Scalar], b: &[Scalar]) -> Vec { out } +impl VecPoly1 { + pub fn zero(n: usize) -> Self { + VecPoly1(vec![Scalar::zero(); n], vec![Scalar::zero(); n]) + } + + pub fn inner_product(&self, rhs: &VecPoly1) -> Poly2 { + // Uses Karatsuba's method + let l = self; + let r = rhs; + + let t0 = inner_product(&l.0, &r.0); + let t2 = inner_product(&l.1, &r.1); + + let l0_plus_l1 = add_vec(&l.0, &l.1); + let r0_plus_r1 = add_vec(&r.0, &r.1); + + let t1 = inner_product(&l0_plus_l1, &r0_plus_r1) - t0 - t2; + + Poly2(t0, t1, t2) + } + + pub fn eval(&self, x: Scalar) -> Vec { + let n = self.0.len(); + let mut out = vec![Scalar::zero(); n]; + for i in 0..n { + out[i] += self.0[i] + self.1[i] * x; + } + out + } +} + +impl Poly2 { + pub fn eval(&self, x: Scalar) -> Scalar { + self.0 + x * (self.1 + x * self.2) + } +} + + #[cfg(test)] mod tests { use super::*; @@ -64,21 +97,4 @@ mod tests { assert_eq!(exp_2[2], Scalar::from_u64(4)); assert_eq!(exp_2[3], Scalar::from_u64(8)); } - - #[test] - fn test_inner_product() { - let a = vec![ - Scalar::from_u64(1), - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - ]; - let b = vec![ - Scalar::from_u64(2), - Scalar::from_u64(3), - Scalar::from_u64(4), - Scalar::from_u64(5), - ]; - assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); - } } From 5bd976ce5733c426b41a00c9daf21a25b838c7af Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 15:24:25 -0700 Subject: [PATCH 102/186] Add n to the transcript to domain-separate rangeproofs of different sizes (#50) --- src/range_proof.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/range_proof.rs b/src/range_proof.rs index 37afad77..59fd389d 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -52,6 +52,9 @@ impl RangeProof { ) -> RangeProof { use subtle::{Choice, ConditionallyAssignable}; + // Commit the range size to domain-separate from rangeproofs of different lengths. + transcript.commit_u64(n as u64); + // Create copies of G, H, so we can pass them to the // (consuming) IPP API later. let G = generators.G.to_vec(); @@ -170,6 +173,7 @@ impl RangeProof { // First, replay the "interactive" protocol using the proof // data to recompute all challenges. + transcript.commit_u64(n as u64); transcript.commit(V.compress().as_bytes()); transcript.commit(self.A.compress().as_bytes()); transcript.commit(self.S.compress().as_bytes()); From a4ca3f923fc04e10a1f445c3b235e5637094130b Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 16:32:27 -0700 Subject: [PATCH 103/186] Refine API docs (#51) This adds public URLs to all the docs in README and documents usage for `RangeProof::create|verify`. Closes #39. --- README.md | 23 ++++++++++++++++++----- src/generators.rs | 6 +++--- src/range_proof.rs | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 54 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 19f6f5a8..f0b26f18 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,18 @@ A pure-Rust implementation of [Bulletproofs][bp_website] using [Ristretto][ristretto]. This crate contains both an implementation and a set of notes on how and why -Bulletproofs work. The external documentation describes how to use this -crate's API, while the internal documentation contains the notes. +Bulletproofs work. The [external documentation][doc_external] describes how to use this +crate’s API, while the [internal documentation][doc_internal] contains the notes. + +## Documentation + +* [Public API documentation][doc_external] +* [Internal documentation][doc_internal] + * [Notes on how Bulletproofs work][bp_notes] (located in the internal `notes` module) + * [Range proof protocol description][rp_notes] + * [Inner product protocol description][ipp_notes] + + Unfortunately, `cargo doc` does not yet have support for custom HTML injection and for documenting private members, so the documentation is built using: @@ -13,11 +23,9 @@ make doc # Builds external documentation make doc-internal # Builds internal documentation ``` -Unfortunately `cargo doc --open` rebuilds the docs without the custom +Note: `cargo doc --open` rebuilds the docs without the custom invocation, so it may be necessary to rerun `make`. -The description of how Bulletproofs work is found in the (internal) `notes` module. - ## WARNING This code is still research-quality. It is not (yet) suitable for deployment. @@ -40,4 +48,9 @@ This crate uses [criterion.rs][criterion] for benchmarks. [bp_website]: https://crypto.stanford.edu/bulletproofs/ [ristretto]: https://doc.dalek.rs/curve25519_dalek/ristretto/index.html +[doc_external]: https://doc.dalek.rs/ristretto_bulletproofs/index.html +[doc_internal]: https://doc-internal.dalek.rs/ristretto_bulletproofs/index.html +[bp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/notes/index.html +[rp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/range_proof/index.html +[ipp_notes]: https://doc-internal.dalek.rs/ristretto_bulletproofs/inner_product_proof/index.html [criterion]: https://github.com/japaric/criterion.rs diff --git a/src/generators.rs b/src/generators.rs index 1ea4aed7..bf5f73af 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -86,7 +86,7 @@ pub struct GeneratorsView<'a> { pub H: &'a [RistrettoPoint], } -/// Entry point for producing a pair of base points for Pedersen commitments. +/// Represents a pair of base points for Pedersen commitments. #[derive(Clone)] pub struct PedersenGenerators { /// Base for the committed value @@ -97,13 +97,13 @@ pub struct PedersenGenerators { } impl PedersenGenerators { - /// Constructs a pair of pedersen generators + /// Constructs a pair of Pedersen generators /// from a pair of generators provided by the user. pub fn new(B: RistrettoPoint, B_blinding: RistrettoPoint) -> Self { PedersenGenerators{B,B_blinding} } - /// Creates a pedersen commitment using the value scalar and a blinding factor. + /// Creates a Pedersen commitment using the value scalar and a blinding factor. pub fn commit(&self, value: Scalar, blinding: Scalar) -> RistrettoPoint { ristretto::multiscalar_mul(&[value, blinding], &[self.B, self.B_blinding]) } diff --git a/src/range_proof.rs b/src/range_proof.rs index 59fd389d..cffa26c3 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -1,4 +1,5 @@ #![allow(non_snake_case)] +#![deny(missing_docs)] #![doc(include = "../docs/range-proof-protocol.md")] @@ -41,7 +42,23 @@ pub struct RangeProof { } impl RangeProof { - /// Create a rangeproof. + /// Create a rangeproof for a given pair of value `v` and + /// blinding scalar `v_blinding`. + /// + /// Usage: + /// ```ascii + /// let n = 64; + /// let generators = Generators::new(PedersenGenerators::default(), n, 1); + /// let mut transcript = ProofTranscript::new(b"RangeproofTest"); + /// let proof = RangeProof::generate_proof( + /// generators.share(0), + /// &mut transcript, + /// &mut rng, + /// n, + /// v, + /// &v_blinding, + /// ); + /// ``` pub fn generate_proof( generators: GeneratorsView, transcript: &mut ProofTranscript, @@ -162,6 +179,21 @@ impl RangeProof { } } + /// Verifies a rangeproof for a given value commitment \\(V\\). + /// + /// Usage: + /// ```ascii + /// let n = 64; + /// let generators = Generators::new(PedersenGenerators::default(), n, 1); + /// let mut transcript = ProofTranscript::new(b"RangeproofTest"); + /// proof.verify( + /// &V, + /// generators.share(0), + /// &mut transcript, + /// &mut OsRng::new().unwrap(), + /// n + /// ); + /// ``` pub fn verify( &self, V: &RistrettoPoint, From 5f7f81d88e5a08ceead7f0774125b80aab27ea39 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 21:45:22 -0700 Subject: [PATCH 104/186] Fix typo in the rangeproof notes: z*V -> z^2*V --- docs/notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/notes.md b/docs/notes.md index 0aeb7e20..d03a78ee 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -315,7 +315,7 @@ The commitments \\(V\\), \\(T\_{1}\\), \\(T\_{2}\\) are related to each other an + &\quad & \quad & + & \quad & \quad & + & \quad & \quad& + &\quad & \quad & + \\\\ {\tilde{t}}(x) {\widetilde{B}} &\quad &= \quad & z^2 {\widetilde{v}} {\widetilde{B}} & \quad &+ \quad & 0 {\widetilde{B}} & \quad &+ \quad& x {\tilde{t}}\_{1} {\widetilde{B}} &\quad &+\quad & x^{2} {\tilde{t}}\_{2} {\widetilde{B}} \\\\ \shortparallel &\quad & \quad & \shortparallel & \quad & \quad & \shortparallel & \quad & \quad& \shortparallel &\quad & \quad & \shortparallel \\\\ - &\quad &= \quad & z V & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x T\_{1} &\quad &+\quad & x^{2} T\_{2} + &\quad &= \quad & z^2 V & \quad &+ \quad & \delta(y,z) B & \quad &+ \quad& x T\_{1} &\quad &+\quad & x^{2} T\_{2} \end{aligned} \\] Notice that the sum of each column is a commitment to the variable in the top From f1a82a09d0b53687b19ccd9b051dbd53463056b1 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 12 Apr 2018 22:45:27 -0700 Subject: [PATCH 105/186] Fix typo in the rangeproof docs: x->u --- docs/range-proof-protocol.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/range-proof-protocol.md b/docs/range-proof-protocol.md index 68c074b8..0c0d082d 100644 --- a/docs/range-proof-protocol.md +++ b/docs/range-proof-protocol.md @@ -178,8 +178,8 @@ Finally, verifier groups all scalars per each point and performs a single multis + & \quad (-{\widetilde{e}} - c{\tilde{t}}(x)) \cdot \widetilde{B} \\\\ + & \quad {\langle {-z\mathbf{1} - a\mathbf{s}}, {\mathbf{G}} \rangle}\\\\ + & \quad {\langle {z\mathbf{1} + {\mathbf{y}}^{-n} \circ (z^2\mathbf{2}^n - b/{\mathbf{s}})}, {\mathbf{H}} \rangle}\\\\ - + & \quad {\langle [x_{1}^2, \dots, x_{k}^2 ], [L_1, \dots, L_{k}] \rangle}\\\\ - + & \quad {\langle [x_{1}^{-2}, \dots, x_{k}^{-2} ], [R_1, \dots, R_{k}] \rangle} + + & \quad {\langle [u_{1}^2, \dots, u_{k}^2 ], [L_1, \dots, L_{k}] \rangle}\\\\ + + & \quad {\langle [u_{1}^{-2}, \dots, u_{k}^{-2} ], [R_1, \dots, R_{k}] \rangle} \end{aligned} \\] where \\(1/{\mathbf{s}}\\) are inverses of \\(\mathbf{s}\\), computed as a reversed list of \\(\mathbf{s}\\). From 7d40ccfa7d751ad9899c6a36ac88b2cd7bd48c04 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 13 Apr 2018 11:00:01 -0700 Subject: [PATCH 106/186] Add MIT license --- LICENSE.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 LICENSE.txt diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..4969a6e0 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Chain, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From ba6de4c96c5b15bb79255328d72fdfbe51b01ac3 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 13 Apr 2018 11:24:29 -0700 Subject: [PATCH 107/186] tweak readme --- README.md | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f0b26f18..48c24506 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,10 @@ This crate contains both an implementation and a set of notes on how and why Bulletproofs work. The [external documentation][doc_external] describes how to use this crate’s API, while the [internal documentation][doc_internal] contains the notes. +## WARNING + +This code is still research-quality. It is not (yet) suitable for deployment. + ## Documentation * [Public API documentation][doc_external] @@ -26,14 +30,15 @@ make doc-internal # Builds internal documentation Note: `cargo doc --open` rebuilds the docs without the custom invocation, so it may be necessary to rerun `make`. -## WARNING - -This code is still research-quality. It is not (yet) suitable for deployment. - ## Tests Run tests with `cargo test`. +## Benchmarks + +This crate uses [criterion.rs][criterion] for benchmarks. Run benchmarks with +`cargo bench`. + ## Features The `yolocrypto` feature enables the `yolocrypto` feature in @@ -44,7 +49,19 @@ Bulletproofs, the `target_cpu` must support AVX2: RUSTFLAGS="-C target_cpu=skylake" cargo bench --features "yolocrypto" ``` -This crate uses [criterion.rs][criterion] for benchmarks. +Skylake-X CPUs have double the AVX2 registers. To use them, try + +```text +RUSTFLAGS="-C target_cpu=skylake-avx512" cargo bench --features "yolocrypto" +``` + +This prevents spills in the AVX2 parallel field multiplication code, but causes +worse code generation elsewhere ¯\\_(ツ)_/¯ + +## About + +This is a research project being built for Chain, Inc, by Henry de Valence, +Cathie Yun, and Oleg Andreev. [bp_website]: https://crypto.stanford.edu/bulletproofs/ [ristretto]: https://doc.dalek.rs/curve25519_dalek/ristretto/index.html From 30ac2dbf0f5a33ee34d075428ef2470ee4b73ea8 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 13 Apr 2018 11:25:48 -0700 Subject: [PATCH 108/186] computers are bad --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 48c24506..63339971 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ RUSTFLAGS="-C target_cpu=skylake-avx512" cargo bench --features "yolocrypto" ``` This prevents spills in the AVX2 parallel field multiplication code, but causes -worse code generation elsewhere ¯\\_(ツ)_/¯ +worse code generation elsewhere ¯\\\_(ツ)\_/¯ ## About From 534f43fd239bda664aed1767c0a9394bab6714c0 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 13 Apr 2018 11:38:42 -0700 Subject: [PATCH 109/186] update cargo.toml --- Cargo.toml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b9999b9c..abe67906 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,15 @@ [package] name = "ristretto-bulletproofs" version = "0.1.0" -authors = ["Cathie "] +authors = ["Cathie Yun ", + "Henry de Valence ", + "Oleg Andreev "] +readme = "README.md" +license = "MIT" +repository = "https://github.com/chain/ristretto-bulletproofs" +categories = ["cryptography"] +keywords = ["cryptography", "ristretto", "zero-knowledge", "bulletproofs"] +description = "A pure-Rust implementation of Bulletproofs using Ristretto" [dependencies] curve25519-dalek = { version = "^0.16", features = ["serde", "nightly"] } @@ -23,7 +31,6 @@ bincode = "1" [features] yolocrypto = ["curve25519-dalek/yolocrypto"] -std = ["curve25519-dalek/std"] [[bench]] name = "bulletproofs" From bf37530ffd9717005fc5f30a94da88a6833bc9ff Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Mon, 16 Apr 2018 16:50:21 -0700 Subject: [PATCH 110/186] Rename remaining x -> u in the IPP implementation (#53) This brings the implementation in sync with the notes. --- src/inner_product_proof.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 1ba3f349..7e571c16 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -93,14 +93,14 @@ impl InnerProductProof { verifier.commit(L.compress().as_bytes()); verifier.commit(R.compress().as_bytes()); - let x = verifier.challenge_scalar(); - let x_inv = x.invert(); + let u = verifier.challenge_scalar(); + let u_inv = u.invert(); for i in 0..n { - a_L[i] = a_L[i] * x + x_inv * a_R[i]; - b_L[i] = b_L[i] * x_inv + x * b_R[i]; - G_L[i] = ristretto::vartime::multiscalar_mul(&[x_inv, x], &[G_L[i], G_R[i]]); - H_L[i] = ristretto::vartime::multiscalar_mul(&[x, x_inv], &[H_L[i], H_R[i]]); + a_L[i] = a_L[i] * u + u_inv * a_R[i]; + b_L[i] = b_L[i] * u_inv + u * b_R[i]; + G_L[i] = ristretto::vartime::multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]); + H_L[i] = ristretto::vartime::multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]); } a = a_L; @@ -137,12 +137,12 @@ impl InnerProductProof { challenges.push(transcript.challenge_scalar()); } - // 2. Compute 1/(x_k...x_1) and 1/x_k, ..., 1/x_1 + // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 let mut challenges_inv = challenges.clone(); let allinv = Scalar::batch_invert(&mut challenges_inv); - // 3. Compute x_i^2 and (1/x_i)^2 + // 3. Compute u_i^2 and (1/u_i)^2 for i in 0..lg_n { // XXX missing square fn upstream @@ -159,10 +159,10 @@ impl InnerProductProof { for i in 1..n { let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; let k = 1 << lg_i; - // The challenges are stored in "creation order" as [x_k,...,x_1], - // so x_{lg(i)+1} = is indexed by (lg_n-1) - lg_i - let x_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; - s.push(s[i - k] * x_lg_i_sq); + // The challenges are stored in "creation order" as [u_k,...,u_1], + // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i + let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; + s.push(s[i - k] * u_lg_i_sq); } (challenges_sq, challenges_inv_sq, s) @@ -186,7 +186,7 @@ impl InnerProductProof { I: IntoIterator, I::Item: Borrow, { - let (x_sq, x_inv_sq, s) = self.verification_scalars(transcript); + let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript); let a_times_s = s.iter().map(|s_i| self.a * s_i); @@ -198,15 +198,15 @@ impl InnerProductProof { .zip(inv_s) .map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow()); - let neg_x_sq = x_sq.iter().map(|xi| -xi); - let neg_x_inv_sq = x_inv_sq.iter().map(|xi| -xi); + let neg_u_sq = u_sq.iter().map(|ui| -ui); + let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui); let expect_P = ristretto::vartime::multiscalar_mul( iter::once(self.a * self.b) .chain(a_times_s) .chain(h_times_b_div_s) - .chain(neg_x_sq) - .chain(neg_x_inv_sq), + .chain(neg_u_sq) + .chain(neg_u_inv_sq), iter::once(Q) .chain(G.iter()) .chain(H.iter()) From 3cadefe4a68a288c99ff307a4f0cf796eee3da60 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Fri, 20 Apr 2018 12:10:57 -0700 Subject: [PATCH 111/186] Clarify notes on how rangeproof is built up (#56) This closes issue #54. --- docs/notes.md | 59 +++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index d03a78ee..e3398e3f 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -114,17 +114,31 @@ the verifier. Proving range statements with bit vectors ----------------------------------------- -Write the bits of \\(v\\) as \\({\mathbf{a}}\\). If \\({\mathbf{2}}^{n}\\) is the -vector \\((1,2,4,\ldots,2^{n-1})\\) of powers of \\(2\\), then +Let \\({\mathbf{a}}\\) be the vector of bits of \\(v\\). +Then \\(v\\) can be represented as an inner product of bits \\({\mathbf{a}}\\) +and powers of two \\({\mathbf{2}}^{n} = (1,2,4,\ldots,2^{n-1})\\): +\\[ +\begin{aligned} + v &= {\langle {\mathbf{a}}, {\mathbf{2}}^{n} \rangle} \\\\ + &= a_{0}\cdot 2^0 + \dots + a_{n-1}\cdot 2^{n-1}. +\end{aligned} +\\] +We need \\({\mathbf{a}}\\) to be a vector of integers \\(\\{0,1\\}\\). +This can be expressed with an additional condition +\\[ +{\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) = {\mathbf{0}}, +\\] +where \\({\mathbf{x}} \circ {\mathbf{y}}\\) denotes the entry-wise multiplication of two vectors. +The result of multiplication can be all-zero if and only if every bit is actually \\(0\\) or[^1] \\(1\\). + +As a result of representing value in binary, the range condition \\(v \in [0, 2^{n})\\) +is equivalent to the pair of conditions \\[ \begin{aligned} {\langle {\mathbf{a}}, {\mathbf{2}}^{n} \rangle} &= v, \\\\ - {\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) &= {\mathbf{0}}^{n} . + {\mathbf{a}} \circ ({\mathbf{a}} - {\mathbf{1}}) &= {\mathbf{0}}. \end{aligned} \\] -Here \\({\mathbf{x}} \circ {\mathbf{y}}\\) denotes the entry-wise -multiplication of two vectors. -Together, these conditions imply the range condition. We will eventually need to make separate commitments to the vectors \\({\mathbf{a}}\\) and \\({\mathbf{a}} - {\mathbf{1}}\\), so we set @@ -138,13 +152,21 @@ eventually need to make separate commitments to the vectors \end{aligned} \\] +[^1]: Generally, condition \\(x=0 \vee y=0\\) can be expressed as \\(x \cdot y = 0\\), +as the product can be zero if and only if at least one of the terms is zero. +This trick allows implementing logical `OR` with any number of terms. + + Proving vectors of statements with a single statement ----------------------------------------------------- The statements above are statements about vectors, or equivalently, a -vector of statements about each entry. Now, we want to combine these -into a single statement. Since \\({\mathbf{b}} = {\mathbf{0}}\\) if and only -if \\({\langle {\mathbf{b}}, {\mathbf{y}}^{n} \rangle} = 0\\) for every \\(y\\), +vector of statements about each entry. We want to combine all of these +into a single statement. + +First, we will combine each of the two vector-statements into a single statement. +Since \\({\mathbf{b}} = {\mathbf{0}}\\) if and only +if[^2] \\({\langle {\mathbf{b}}, {\mathbf{y}}^{n} \rangle} = 0\\) for every \\(y\\), the statements above are implied by \\[ \begin{aligned} @@ -153,8 +175,10 @@ the statements above are implied by {\langle {\mathbf{a}}\_{L}, {\mathbf{a}}\_{R} \circ {\mathbf{y}}^{n} \rangle} &= 0 \end{aligned} \\] -for the verifier’s choice of a challenge value \\(y\\). These statements can -then be combined in the same way, using the verifier’s choice of \\(z\\): +for the verifier’s choice of a challenge value \\(y\\). + +The three resulting statements can then be combined in the same way, +using the verifier’s choice of \\(z\\): \\[ \begin{aligned} z^{2} v @@ -165,6 +189,13 @@ z^{2} v \end{aligned} \\] +[^2]: This is because the polynomial in terms of \\(y\\) is zero at every point +if and only if every term of it is zero. The verifier is going to sample +a random \\(y\\) after the prover commits to all the values forming the terms of +that polynomial, making the probability that the prover cheated negligible. +This trick allows implementing logical `AND` with any number of terms. + + Combining inner-products ------------------------ @@ -322,7 +353,7 @@ Notice that the sum of each column is a commitment to the variable in the top row using the blinding factor in the second row. The sum of all of the columns is \\(t(x) B + {\tilde{t}}(x) {\widetilde{B}}\\), a commitment to the value -of \\(t\\) at the point \\(x\\), using the synthetic blinding factor[^1] +of \\(t\\) at the point \\(x\\), using the synthetic blinding factor[^3] \\[ {\tilde{t}}(x) = z^{2} {\tilde{v}} + x {\tilde{t}}\_{1} + x^{2} {\tilde{t}}\_{2}. \\] @@ -334,7 +365,7 @@ bottom row of the diagram to check consistency: t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 V + \delta(y,z) B + x T\_{1} + x^{2} T\_{2}. \\] -[^1]: The blinding factor is synthetic in the sense that it is +[^3]: The blinding factor is synthetic in the sense that it is synthesized from the blinding factors of the other commitments. Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct @@ -457,7 +488,7 @@ for the following relation: c &{}={}&& {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} \end{aligned} \\] -Let’s compress these two statements into one equation using an +Let’s combine these two statements into one equation using an indeterminate variable \\(w \in {\mathbb Z\_{p}^{\times}}\\) and multiplying the second equation by an orthogonal generator \\(B \in {\mathbb G}\\): From 634d28ab2fdcd23803d9f9c5e1676727cb8a21bc Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 16:41:14 -0700 Subject: [PATCH 112/186] Squashed commit of the following: commit 7ef75ba191a15625127fe9a16da052e0dfbb4c14 Author: Cathie Date: Tue Apr 24 13:58:15 2018 -0700 error handling fixups commit ae72cdc489f6c7f6a87175a5a400169e78e5c329 Author: Cathie Date: Thu Apr 19 11:20:05 2018 -0700 cargo fmt commit 355467d7f14f6756c35cc25e331c77c922e92c34 Author: Cathie Date: Thu Apr 19 11:18:13 2018 -0700 Make states consume self, also pass in transcript to dealers instaed of storing it directly commit 29316ac8e94681efe6ba04e34b09281f898ab13a Author: Cathie Date: Wed Apr 18 19:13:48 2018 -0700 use seperate test file, move create_multi to test file commit b5f98c522b3097df4d5a5ae6d65a66d463dc6668 Author: Cathie Date: Wed Apr 18 19:03:31 2018 -0700 remove l, r from final proof commit dc0fa3a23232441d4bd2929b2761f15a8e44ad60 Author: Cathie Date: Wed Apr 18 18:19:24 2018 -0700 add Valuechallenge and PolyChallenge messages commit 7dd9d31973dc9610c5b13447528aaa17828d8d96 Author: Cathie Date: Tue Apr 17 12:40:43 2018 -0700 break aggregated_rp into multiple files commit 7da8123aad0233932ccc883fcaf788ee3677d1c1 Merge: 6078876 525cf93 Author: Cathie Date: Tue Apr 17 11:47:23 2018 -0700 merge with main commit 6078876677e159eb1c8fdc25908a71b3b58db888 Author: Cathie Date: Thu Mar 22 14:22:11 2018 -0700 clean up code commit 512c664cf2ac16c973da71eb24d29cdb1ab6399e Author: Cathie Date: Thu Mar 22 14:10:16 2018 -0700 fix P calculation for aggregated version commit c67ef503f532db529064e3811be1ac6f5b01f828 Author: Cathie Date: Thu Mar 22 13:12:38 2018 -0700 fix t_check by fixing delta, t_check passes all j commit 8d889bced1de9521b8391bd85a4cf2baf097c9e7 Author: Cathie Date: Thu Mar 22 13:10:30 2018 -0700 debugging - breaking mega_check into pieces commit 6facad5c18f57346f67c302126ebdbccc2fd5782 Author: Cathie Date: Thu Mar 22 11:44:38 2018 -0700 updated tests commit 1537605a06ba17148af34d6edd0d9787b043dfde Author: Cathie Date: Thu Mar 22 11:02:31 2018 -0700 debugging - passes 1 & 2 but not 3 commit 1c66ded0bf53dcfd1a58eeb187f9867a937a4471 Author: Cathie Date: Wed Mar 21 17:36:03 2018 -0700 implemented aggregated & optimized verifier math, but only passes for j=1... WIP commit 097fd7fb3bd62229972e7003716423e0f2e44cf6 Author: Cathie Date: Wed Mar 21 14:35:57 2018 -0700 update delta function - still only passes j=1 case, not done with all verifier math updates commit 8ecb4fa7f8208c8a1c9fe3f426467485ee1ed688 Author: Cathie Date: Wed Mar 21 13:55:50 2018 -0700 clean up code, some renames commit 45637cc9d0a8caf752a2cb0fe89564153fc1cba7 Author: Cathie Date: Wed Mar 21 13:49:28 2018 -0700 add ipp, single value optimizations to aggregated range proof - did not update verifier math, so it only passes the one party check. commit c31f551828f35df4993ebaf3c75c5b90272a20db Author: Cathie Date: Mon Mar 19 18:22:16 2018 -0700 cleanup commit 1eccd45af62794fb31f94c5659433072fdd3469c Author: Cathie Date: Mon Mar 19 17:41:43 2018 -0700 rearrange structs to be next to impl commit 3194ed681c5923c5224277c237e2bf296144864e Author: Cathie Date: Mon Mar 19 17:40:41 2018 -0700 add vecPoly2 and PolyDeg3 ops to util file commit f9aa757458f86318f084fd528b209e07f5b5651b Merge: 60de532 6b017d1 Author: Henry de Valence Date: Mon Mar 19 17:10:22 2018 -0700 Merge branch 'master' into multi_rp commit 60de53274d101339b893eaecc70ebe2edb5768b8 Author: Cathie Date: Mon Mar 19 16:47:21 2018 -0700 some renaming commit 5b6bf6226d658074fc852573812dce6d573304d2 Author: Cathie Date: Mon Mar 19 16:35:33 2018 -0700 reformat, change generator passing commit e8583bfab14c81266252f3276ccabf6838c94386 Author: Oleg Andreev Date: Wed Mar 14 17:33:01 2018 -0700 revert single range proof commit ec9c59cab6517350dbcae227253e775ffa87ed1e Merge: 2fb1c2f fc6791b Author: Oleg Andreev Date: Wed Mar 14 17:31:33 2018 -0700 merge multi RP fixes commit fc6791be88fd9eb4964f54440214c4778df095ef Author: Oleg Andreev Date: Wed Mar 14 17:25:21 2018 -0700 cleanup warnings commit a44e6ee15db3476d1ebec0d24f6cd8a1915931a9 Author: Oleg Andreev Date: Wed Mar 14 17:22:07 2018 -0700 tests for the multi-party RP commit 5b1888b17b0c56dc81d44f1a81143160c7447e38 Author: Oleg Andreev Date: Wed Mar 14 17:01:36 2018 -0700 wip commit 4c4e4c6907370e4979b0c3130e7f6d31e21151e2 Author: Oleg Andreev Date: Wed Mar 14 16:51:23 2018 -0700 switch to new generators commit 405e03836e394c5a4d3203db0ccd0dd95b58ba75 Author: Oleg Andreev Date: Wed Mar 14 16:16:46 2018 -0700 wip commit 6a66a96b7c2cf13673f7aa2482586da55c486921 Author: Oleg Andreev Date: Tue Mar 13 17:50:14 2018 -0700 wip commit 3f8468a307a8723423502c4a08c1432a2e1bc8ed Author: Oleg Andreev Date: Tue Mar 13 17:41:53 2018 -0700 wip commit ff8e8394f29baa7f3dd4d14cb142594275a13463 Author: Oleg Andreev Date: Tue Mar 13 15:28:51 2018 -0700 wip commit f0f962adf3e28ede1b2d42d98461bb71a5f21da8 Author: Oleg Andreev Date: Tue Mar 13 14:25:32 2018 -0700 scalar pow funcs commit 00d5581880627a8d81d90e062b2ba3c32c0576bc Author: Oleg Andreev Date: Tue Mar 13 11:25:22 2018 -0700 wip commit d19a84c6443b8095b6fc26d59b082b325f7e46b7 Author: Oleg Andreev Date: Tue Mar 13 10:59:37 2018 -0700 j = 0..m-1 commit 011c49789a4f4d1c4a23ca42df4f6a0f1c6aa174 Author: Oleg Andreev Date: Mon Mar 12 23:01:49 2018 -0700 wip on refactoring commit 5477d4e666a5d8cd3d7ea382d6763c49a3604a79 Author: Cathie Date: Mon Mar 12 18:54:20 2018 -0700 Move logic to new functions, still broken commit 30fe8ec29a3e594266df589d03ebe0bc5b70db7b Author: Cathie Date: Mon Mar 12 18:27:43 2018 -0700 write out struct & method outlines commit e50f97dd504d9f0fb01e5f6718480d51a708f613 Author: Cathie Date: Mon Mar 12 17:12:10 2018 -0700 add new structs, wip - currently broken commit d24bae8aed81dcdf40169f2daefc72e52e70d0f6 Author: Cathie Date: Thu Mar 8 14:02:23 2018 -0800 relabel randomness to be more readable commit d90e12c9164c87b4fee18ee99fbb6dbc390b1944 Author: Cathie Date: Thu Mar 8 12:04:54 2018 -0800 change j indexing to start at 0 instead of 1 commit 954c0b502e11874a4b10b32dae2823a065e17344 Author: Oleg Andreev Date: Thu Mar 8 11:47:06 2018 -0800 test fixes commit 42664c424a5eedeb97f263df5a7f4ea0bb700934 Author: Oleg Andreev Date: Thu Mar 8 11:36:08 2018 -0800 fmt, more commits in RO commit b750314cae01d442e6cac52931051d53e8e3cabc Author: Cathie Date: Thu Mar 8 11:19:37 2018 -0800 use random oracle instead of commit-hash function commit 15f7d8a473a9ff57c4542afec8f51403420f5248 Author: Cathie Date: Tue Mar 6 13:50:49 2018 -0800 wip commit 95af6b9d7739c1c77f5457775e9ae3483ed823e2 Author: Cathie Date: Mon Mar 5 16:19:10 2018 -0800 change to m=10 proof because it makes oleg happier commit 66f36e8202e76a6c01913f68b18fccdef5673269 Author: Cathie Date: Mon Mar 5 16:11:30 2018 -0800 generalize to work for arbitrary # of parties commit 1a25bcf921c52aa8ea75be434efea94395c33973 Author: Cathie Date: Mon Mar 5 15:49:24 2018 -0800 generalize create method for j parties instead of just 2 commit 687c8820643cb7d8a6e71193a72f35c99839ab7b Author: Cathie Date: Mon Mar 5 15:16:42 2018 -0800 fix 2^n math, now it works for 2 parties commit 401cac1d20eefba62404873d118bf492cc809d27 Author: Cathie Date: Mon Mar 5 14:42:29 2018 -0800 added new delta definition, now check_one doesnt work commit 8594096942ce30be673b2eb8906ad1c5c30a0c26 Author: Cathie Date: Mon Mar 5 14:25:15 2018 -0800 change combine function to create new proof commit b9625e45d4ae720641a1dc9ef2dddaf7250fe65b Author: Oleg Andreev Date: Mon Mar 5 12:03:19 2018 -0800 wip commit cc4db3ec1df36b31a74244c6c55cfbf1e67d1aab Author: Oleg Andreev Date: Mon Mar 5 11:16:02 2018 -0800 explicit rust nightly via rust-toolchain, some api fixes in rp commit 083610fcefb668dd20febe5351d9f78ba26eb007 Author: Cathie Date: Fri Mar 2 17:46:38 2018 -0800 add combiner and prove_two functions, doesnt work yet commit ae54b2bcb825adaf04c6e456150d55ac75c7733a Author: Cathie Date: Fri Mar 2 16:57:18 2018 -0800 fix aggregation verifier logic commit 1626a3e77efc53690c91450a809eb7c441cda255 Author: Cathie Date: Fri Mar 2 15:19:32 2018 -0800 add aggregation math, works for j=1 commit cfe72fb7425fc290a8451d57f5277db935bef291 Author: Cathie Date: Fri Mar 2 14:52:39 2018 -0800 add clone trait, runs & passes tests now commit 8999202a0b86c53090772a080020de1695d1010a Author: Cathie Date: Fri Mar 2 14:42:26 2018 -0800 restructure to make commitments, rename parts commit d98795a139c1abc0ba5c954d927b472ebd7e68fa Author: Cathie Date: Fri Mar 2 11:47:13 2018 -0800 verification for state machine range proofs commit ff4c4d9bcf27841f37200c4d24262ebf1df6fe33 Author: Cathie Date: Thu Mar 1 14:44:51 2018 -0800 starting on state machine for rangeproof commit f8a5780f553bad853b693501aed8e4a825b7bfb9 Author: Cathie Date: Thu Mar 1 13:51:12 2018 -0800 add j, m to normal range proof commit 2fb1c2f506d4ad932d2e1de24e254aefef797cf0 Author: Cathie Date: Mon Mar 12 18:54:20 2018 -0700 Move logic to new functions, still broken commit 1c9ab136db4e23dab8d6923efc0d7da0b277b72a Author: Cathie Date: Mon Mar 12 18:27:43 2018 -0700 write out struct & method outlines commit 5030a06236cb13ab20da4277ccbfd59e542c0d9d Merge: 8c5eb03 07926df Author: Cathie Date: Mon Mar 12 17:16:04 2018 -0700 Merge branch 'master' of github.com:chain/ristretto-bulletproofs into multi_rp commit 8c5eb0326181a07b5d0ae293371f1d98fd953455 Author: Cathie Date: Mon Mar 12 17:12:10 2018 -0700 add new structs, wip - currently broken commit 86b6d986a66d23394f82fd08a5efa7d4a6fd67d9 Author: Cathie Date: Thu Mar 8 14:02:23 2018 -0800 relabel randomness to be more readable commit e3c3e4d37b5d8614297609efb5fe8350f9a409db Author: Cathie Date: Thu Mar 8 12:04:54 2018 -0800 change j indexing to start at 0 instead of 1 commit 5c0ac88adf8a88d111d6d0ad3a9498fcc502126e Author: Oleg Andreev Date: Thu Mar 8 11:47:06 2018 -0800 test fixes commit 56ab0786e41d7d9a0a9b3b36c6919918bcc4f898 Author: Oleg Andreev Date: Thu Mar 8 11:36:08 2018 -0800 fmt, more commits in RO commit 926927ace299c187657eac779275c958b4e0a219 Author: Cathie Date: Thu Mar 8 11:19:37 2018 -0800 use random oracle instead of commit-hash function commit caf38607e6f619926fb0d150b47e68ed695fe23d Author: Cathie Date: Tue Mar 6 13:50:49 2018 -0800 wip commit 84ab99cb17651312a79fbe05ba52ed7f9c89fa79 Author: Cathie Date: Mon Mar 5 16:19:10 2018 -0800 change to m=10 proof because it makes oleg happier commit c2c4a8d0dd8df60044fb19704e8cd867f46909fd Author: Cathie Date: Mon Mar 5 16:11:30 2018 -0800 generalize to work for arbitrary # of parties commit 477cbdd0756e9b6f3f1453683a1709980efd07e1 Author: Cathie Date: Mon Mar 5 15:49:24 2018 -0800 generalize create method for j parties instead of just 2 commit 3656e1b65ba7a3514982d8885cac33e42fad412e Author: Cathie Date: Mon Mar 5 15:16:42 2018 -0800 fix 2^n math, now it works for 2 parties commit bb97df8d6f8ebefc998c0024da050e640e351294 Author: Cathie Date: Mon Mar 5 14:42:29 2018 -0800 added new delta definition, now check_one doesnt work commit 3f7bc205024e81adfbe08440a2b14a471450c431 Author: Cathie Date: Mon Mar 5 14:25:15 2018 -0800 change combine function to create new proof commit 7bd3588e374b92546611a0cabefa7e54ee8539e3 Author: Cathie Date: Mon Mar 5 12:16:27 2018 -0800 newer better fresher rust that actually works commit d85bffe484130aeb01311fb3b4d731f34a03f20a Author: Oleg Andreev Date: Mon Mar 5 12:03:19 2018 -0800 wip commit 3b5a51f39dc7598d3a6f78180b559bf0603f3d74 Author: Oleg Andreev Date: Mon Mar 5 11:16:02 2018 -0800 explicit rust nightly via rust-toolchain, some api fixes in rp commit c76c10fa33fc1b25bd985352d4725fbe6724ab81 Author: Cathie Date: Fri Mar 2 17:46:38 2018 -0800 add combiner and prove_two functions, doesnt work yet commit 66f551d9229c3253a5f1d3254d9735a3dbe3d3a0 Author: Cathie Date: Fri Mar 2 16:57:18 2018 -0800 fix aggregation verifier logic commit 837bd22ec9c45962b159df71ce9d101ccd7d60eb Author: Cathie Date: Fri Mar 2 15:19:32 2018 -0800 add aggregation math, works for j=1 commit 2fdac5f2a44270860145fabeb5ecc9581d40f6d8 Author: Cathie Date: Fri Mar 2 14:52:39 2018 -0800 add clone trait, runs & passes tests now commit 7f2bc6d35dbdec5b57f6e428d0f1fc959a61a037 Author: Cathie Date: Fri Mar 2 14:42:26 2018 -0800 restructure to make commitments, rename parts commit b706b601d8c1019f0bd3260d58dd23c8c3cedb63 Author: Cathie Date: Fri Mar 2 11:47:13 2018 -0800 verification for state machine range proofs commit ae6dbff3ed70c7339908d8ce5b3b3e98f5b3701c Author: Cathie Date: Thu Mar 1 14:44:51 2018 -0800 starting on state machine for rangeproof commit abe60a0a4c47b5fbf6823ea9ba369af96d8f34c8 Author: Cathie Date: Thu Mar 1 13:51:12 2018 -0800 add j, m to normal range proof --- benches/bulletproofs.rs | 4 +- src/aggregated_range_proof/dealer.rs | 174 +++++++++++++++++++ src/aggregated_range_proof/messages.rs | 187 +++++++++++++++++++++ src/aggregated_range_proof/mod.rs | 6 + src/aggregated_range_proof/party.rs | 222 +++++++++++++++++++++++++ src/aggregated_range_proof/tests.rs | 106 ++++++++++++ src/generators.rs | 13 +- src/inner_product_proof.rs | 8 +- src/lib.rs | 7 +- src/range_proof.rs | 31 ++-- src/util.rs | 60 +++++++ 11 files changed, 794 insertions(+), 24 deletions(-) create mode 100644 src/aggregated_range_proof/dealer.rs create mode 100644 src/aggregated_range_proof/messages.rs create mode 100644 src/aggregated_range_proof/mod.rs create mode 100644 src/aggregated_range_proof/party.rs create mode 100644 src/aggregated_range_proof/tests.rs diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index d6d2911a..25bc639e 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -10,9 +10,9 @@ extern crate curve25519_dalek; use curve25519_dalek::scalar::Scalar; extern crate ristretto_bulletproofs; -use ristretto_bulletproofs::{PedersenGenerators, Generators}; use ristretto_bulletproofs::ProofTranscript; use ristretto_bulletproofs::RangeProof; +use ristretto_bulletproofs::{Generators, PedersenGenerators}; fn bench_create_helper(n: usize, c: &mut Criterion) { c.bench_function(&format!("create_rangeproof_n_{}", n), move |b| { @@ -48,7 +48,7 @@ fn bench_verify_helper(n: usize, c: &mut Criterion) { let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); let v_blinding = Scalar::random(&mut rng); - let vc = pg.commit(Scalar::from_u64(v), v_blinding); + let vc = pg.commit(Scalar::from_u64(v), v_blinding); let rp = RangeProof::generate_proof( generators.share(0), diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs new file mode 100644 index 00000000..3028585e --- /dev/null +++ b/src/aggregated_range_proof/dealer.rs @@ -0,0 +1,174 @@ +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::Identity; +use generators::GeneratorsView; +use inner_product_proof; +use proof_transcript::ProofTranscript; +use std::clone::Clone; +use util; + +use super::messages::*; + +/// Dealer is an entry-point API for setting up a dealer +pub struct Dealer {} + +impl Dealer { + /// Creates a new dealer with the given parties and a number of bits + pub fn new( + transcript: &mut ProofTranscript, + n: usize, + m: usize, + ) -> DealerAwaitingValues { + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + DealerAwaitingValues { n, m } + } +} + +/// When the dealer is initialized, it only knows the size of the set. +pub struct DealerAwaitingValues { + n: usize, + m: usize, +} + +impl DealerAwaitingValues { + /// Combines commitments and computes challenge variables. + pub fn receive_value_commitments( + self, + transcript: &mut ProofTranscript, + vc: &Vec, + ) -> (DealerAwaitingPoly, ValueChallenge) { + assert!(vc.len() == self.m); + let mut A = RistrettoPoint::identity(); + let mut S = RistrettoPoint::identity(); + + for commitment in vc.iter() { + // Commit each V individually + transcript.commit(commitment.V.compress().as_bytes()); + + // Commit sums of As and Ss. + A += commitment.A; + S += commitment.S; + } + + transcript.commit(A.compress().as_bytes()); + transcript.commit(S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + + (DealerAwaitingPoly { n: self.n }, ValueChallenge { y, z }) + } +} + +pub struct DealerAwaitingPoly { + n: usize, +} + +impl DealerAwaitingPoly { + pub fn receive_poly_commitments( + self, + transcript: &mut ProofTranscript, + poly_commitments: &Vec, + ) -> (DealerAwaitingShares, PolyChallenge) { + // Commit sums of T1s and T2s. + let mut T1 = RistrettoPoint::identity(); + let mut T2 = RistrettoPoint::identity(); + for commitment in poly_commitments.iter() { + T1 += commitment.T_1; + T2 += commitment.T_2; + } + transcript.commit(T1.compress().as_bytes()); + transcript.commit(T2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + (DealerAwaitingShares { n: self.n }, PolyChallenge { x }) + } +} + +pub struct DealerAwaitingShares { + n: usize, +} + +impl DealerAwaitingShares { + pub fn receive_shares( + self, + transcript: &mut ProofTranscript, + proof_shares: &Vec, + gen: &GeneratorsView, + y: Scalar, + ) -> Proof { + let value_commitments = proof_shares + .iter() + .map(|ps| ps.value_commitment.V.clone()) + .collect(); + let A = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |A, ps| { + A + ps.value_commitment.A + }); + let S = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |S, ps| { + S + ps.value_commitment.S + }); + let T_1 = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |T_1, ps| { + T_1 + ps.poly_commitment.T_1 + }); + let T_2 = proof_shares + .iter() + .fold(RistrettoPoint::identity(), |T_2, ps| { + T_2 + ps.poly_commitment.T_2 + }); + let t = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.t_x); + let t_x_blinding = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.t_x_blinding); + let e_blinding = proof_shares + .iter() + .fold(Scalar::zero(), |acc, ps| acc + ps.e_blinding); + transcript.commit(t.as_bytes()); + transcript.commit(t_x_blinding.as_bytes()); + transcript.commit(e_blinding.as_bytes()); + + // Get a challenge value to combine statements for the IPP + let w = transcript.challenge_scalar(); + let Q = w * gen.pedersen_generators.B; + + let l_vec: Vec = proof_shares + .iter() + .flat_map(|ps| ps.l_vec.clone().into_iter()) + .collect(); + let r_vec: Vec = proof_shares + .iter() + .flat_map(|ps| ps.r_vec.clone().into_iter()) + .collect(); + let ipp_proof = inner_product_proof::InnerProductProof::create( + transcript, + &Q, + util::exp_iter(y.invert()), + gen.G.to_vec(), + gen.H.to_vec(), + l_vec.clone(), + r_vec.clone(), + ); + + Proof { + n: self.n, + value_commitments, + A, + S, + T_1, + T_2, + t_x: t, + t_x_blinding, + e_blinding, + ipp_proof, + } + } +} diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs new file mode 100644 index 00000000..d1d38b2c --- /dev/null +++ b/src/aggregated_range_proof/messages.rs @@ -0,0 +1,187 @@ +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use inner_product_proof; + +use curve25519_dalek::ristretto; +use curve25519_dalek::traits::IsIdentity; +use proof_transcript::ProofTranscript; +use rand::Rng; +use std::iter; +use util; + +#[derive(Clone)] +pub struct ValueCommitment { + pub V: RistrettoPoint, + pub A: RistrettoPoint, + pub S: RistrettoPoint, +} + +pub struct ValueChallenge { + pub y: Scalar, + pub z: Scalar, +} + +#[derive(Clone)] +pub struct PolyCommitment { + pub T_1: RistrettoPoint, + pub T_2: RistrettoPoint, +} + +pub struct PolyChallenge { + pub x: Scalar, +} + +#[derive(Clone)] +pub struct ProofShare { + pub value_commitment: ValueCommitment, + pub poly_commitment: PolyCommitment, + + pub t_x: Scalar, + pub t_x_blinding: Scalar, + pub e_blinding: Scalar, + + pub l_vec: Vec, + pub r_vec: Vec, +} + +pub struct Proof { + pub n: usize, + /// Commitment to the value + // XXX this should not be included, so that we can prove about existing commitments + // included for now so that it's easier to test + pub value_commitments: Vec, + /// Commitment to the bits of the value + pub A: RistrettoPoint, + /// Commitment to the blinding factors + pub S: RistrettoPoint, + /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) + pub T_1: RistrettoPoint, + /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) + pub T_2: RistrettoPoint, + /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) + pub t_x: Scalar, + /// Blinding factor for the synthetic commitment to \\(t(x)\\) + pub t_x_blinding: Scalar, + /// Blinding factor for the synthetic commitment to the inner-product arguments + pub e_blinding: Scalar, + /// Proof data for the inner-product argument. + pub ipp_proof: inner_product_proof::InnerProductProof, +} + +impl Proof { + pub fn verify(&self, rng: &mut R, transcript: &mut ProofTranscript) -> Result<(), ()> { + use generators::{Generators, PedersenGenerators}; + + let n = self.n; + let m = self.value_commitments.len(); + + let generators = Generators::new(PedersenGenerators::default(), n, m); + let gen = generators.all(); + + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + + for V in self.value_commitments.iter() { + transcript.commit(V.compress().as_bytes()); + } + transcript.commit(self.A.compress().as_bytes()); + transcript.commit(self.S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + + transcript.commit(self.T_1.compress().as_bytes()); + transcript.commit(self.T_2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + transcript.commit(self.t_x.as_bytes()); + transcript.commit(self.t_x_blinding.as_bytes()); + transcript.commit(self.e_blinding.as_bytes()); + + let w = transcript.challenge_scalar(); + let zz = z * z; + let minus_z = -z; + + // Challenge value for batching statements to be verified + let c = Scalar::random(rng); + + let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); + + let s_inv = s.iter().rev(); + + let a = self.ipp_proof.a; + let b = self.ipp_proof.b; + + let g = s.iter().map(|s_i| minus_z - a * s_i); + + // Compute product in updated P + // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n + let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); + let powers_of_z = util::exp_iter(z).take(m); + let concat_z_and_2 = + powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); + + let h = s_inv + .zip(util::exp_iter(y.invert())) + .zip(concat_z_and_2) + .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); + + let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); + let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); + + let mega_check = ristretto::vartime::multiscalar_mul( + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(value_commitment_scalars) + .chain(iter::once(c * x)) + .chain(iter::once(c * x * x)) + .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) + .chain(iter::once(basepoint_scalar)) + .chain(g) + .chain(h) + .chain(x_sq.iter().cloned()) + .chain(x_inv_sq.iter().cloned()), + iter::once(&self.A) + .chain(iter::once(&self.S)) + .chain(self.value_commitments.iter()) + .chain(iter::once(&self.T_1)) + .chain(iter::once(&self.T_2)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)) + .chain(iter::once(&gen.pedersen_generators.B)) + .chain(gen.G.iter()) + .chain(gen.H.iter()) + .chain(self.ipp_proof.L_vec.iter()) + .chain(self.ipp_proof.R_vec.iter()), + ); + + if mega_check.is_identity() { + Ok(()) + } else { + Err(()) + } + } +} + +/// Compute delta(y,z) = (z - z^2)<1^n*m, y^n*m> + z^3 <1, 2^n*m> * \sum_j=0^(m-1) z^j +fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { + let two = Scalar::from_u64(2); + + // XXX this could be more efficient, esp for powers of 2 + let sum_of_powers_of_y = util::exp_iter(*y) + .take(n * m) + .fold(Scalar::zero(), |acc, x| acc + x); + + // XXX TODO: just calculate (2^n - 1) instead + let sum_of_powers_of_2 = util::exp_iter(two) + .take(n) + .fold(Scalar::zero(), |acc, x| acc + x); + + let sum_of_powers_of_z = util::exp_iter(*z) + .take(m) + .fold(Scalar::zero(), |acc, x| acc + x); + + let zz = z * z; + + (z - zz) * sum_of_powers_of_y - z * zz * sum_of_powers_of_2 * sum_of_powers_of_z +} diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs new file mode 100644 index 00000000..d42f265f --- /dev/null +++ b/src/aggregated_range_proof/mod.rs @@ -0,0 +1,6 @@ +#![allow(non_snake_case)] + +mod dealer; +mod messages; +mod party; +mod tests; diff --git a/src/aggregated_range_proof/party.rs b/src/aggregated_range_proof/party.rs new file mode 100644 index 00000000..7d5cd95e --- /dev/null +++ b/src/aggregated_range_proof/party.rs @@ -0,0 +1,222 @@ +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use generators::Generators; +use rand::Rng; +use std::clone::Clone; +use std::iter; +use util; + +use super::messages::*; + +/// Party is an entry-point API for setting up a party. +pub struct Party {} + +impl Party { + pub fn new( + v: u64, + v_blinding: Scalar, + n: usize, + generators: &Generators, + ) -> PartyAwaitingPosition { + let V = generators + .share(0) + .pedersen_generators + .commit(Scalar::from_u64(v), v_blinding); + + PartyAwaitingPosition { + generators: generators, + n, + v, + v_blinding, + V, + } + } +} + +/// As party awaits its position, they only know their value and desired bit-size of the proof. +pub struct PartyAwaitingPosition<'a> { + generators: &'a Generators, + n: usize, + v: u64, + v_blinding: Scalar, + V: RistrettoPoint, +} + +impl<'a> PartyAwaitingPosition<'a> { + /// Assigns the position to a party, + /// at which point the party knows its generators. + pub fn assign_position( + self, + j: usize, + mut rng: &mut R, + ) -> (PartyAwaitingValueChallenge<'a>, ValueCommitment) { + let gen_share = self.generators.share(j); + + let a_blinding = Scalar::random(&mut rng); + // Compute A = + + a_blinding * B_blinding + let mut A = gen_share.pedersen_generators.B_blinding * a_blinding; + for i in 0..self.n { + let v_i = (self.v >> i) & 1; + // XXX replace this with a conditional move + if v_i == 1 { + A += gen_share.G[i]; // + bit*G_i + } else { + A -= gen_share.H[i]; // + (bit-1)*H_i + } + } + + let s_blinding = Scalar::random(&mut rng); + let s_L: Vec = (0..self.n).map(|_| Scalar::random(&mut rng)).collect(); + let s_R: Vec = (0..self.n).map(|_| Scalar::random(&mut rng)).collect(); + + // Compute S = + + s_blinding * B_blinding + let S = ristretto::multiscalar_mul( + iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), + iter::once(&gen_share.pedersen_generators.B_blinding) + .chain(gen_share.G.iter()) + .chain(gen_share.H.iter()), + ); + + // Return next state and all commitments + let value_commitment = ValueCommitment { V: self.V, A, S }; + let next_state = PartyAwaitingValueChallenge { + n: self.n, + v: self.v, + v_blinding: self.v_blinding, + + j, + generators: self.generators, + value_commitment: value_commitment.clone(), + a_blinding, + s_blinding, + s_L, + s_R, + }; + (next_state, value_commitment) + } +} + +/// When party knows its position (`j`), it can produce commitments +/// to all bits of the value and necessary blinding factors. +pub struct PartyAwaitingValueChallenge<'a> { + n: usize, // bitsize of the range + v: u64, + v_blinding: Scalar, + + j: usize, // index of the party, 1..m as in original paper + generators: &'a Generators, + value_commitment: ValueCommitment, + a_blinding: Scalar, + s_blinding: Scalar, + s_L: Vec, + s_R: Vec, +} + +impl<'a> PartyAwaitingValueChallenge<'a> { + pub fn apply_challenge( + self, + vc: &ValueChallenge, + rng: &mut R, + ) -> (PartyAwaitingPolyChallenge, PolyCommitment) { + let n = self.n; + let offset_y = util::scalar_exp_vartime(&vc.y, (self.j * n) as u64); + let offset_z = util::scalar_exp_vartime(&vc.z, self.j as u64); + + // Calculate t by calculating vectors l0, l1, r0, r1 and multiplying + let mut l_poly = util::VecPoly1::zero(n); + let mut r_poly = util::VecPoly1::zero(n); + + let zz = vc.z * vc.z; + let mut exp_y = offset_y; // start at y^j + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for i in 0..n { + let a_L_i = Scalar::from_u64((self.v >> i) & 1); + let a_R_i = a_L_i - Scalar::one(); + + l_poly.0[i] = a_L_i - vc.z; + l_poly.1[i] = self.s_L[i]; + r_poly.0[i] = exp_y * (a_R_i + vc.z) + zz * offset_z * exp_2; + r_poly.1[i] = exp_y * self.s_R[i]; + + exp_y = exp_y * vc.y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + let t_poly = l_poly.inner_product(&r_poly); + + // Generate x by committing to T_1, T_2 (line 49-54) + let t_1_blinding = Scalar::random(rng); + let t_2_blinding = Scalar::random(rng); + let T_1 = self.generators + .share(self.j) + .pedersen_generators + .commit(t_poly.1, t_1_blinding); + let T_2 = self.generators + .share(self.j) + .pedersen_generators + .commit(t_poly.2, t_2_blinding); + + let poly_commitment = PolyCommitment { T_1, T_2 }; + + let papc = PartyAwaitingPolyChallenge { + value_commitment: self.value_commitment.clone(), //TODO: remove clone + poly_commitment: poly_commitment.clone(), + z: vc.z, + offset_z, + l_poly, + r_poly, + t_poly, + v_blinding: self.v_blinding, + a_blinding: self.a_blinding, + s_blinding: self.s_blinding, + t_1_blinding, + t_2_blinding, + }; + + (papc, poly_commitment) + } +} + +pub struct PartyAwaitingPolyChallenge { + value_commitment: ValueCommitment, + poly_commitment: PolyCommitment, + + z: Scalar, + offset_z: Scalar, + l_poly: util::VecPoly1, + r_poly: util::VecPoly1, + t_poly: util::Poly2, + v_blinding: Scalar, + a_blinding: Scalar, + s_blinding: Scalar, + t_1_blinding: Scalar, + t_2_blinding: Scalar, +} + +impl PartyAwaitingPolyChallenge { + pub fn apply_challenge(self, pc: &PolyChallenge) -> ProofShare { + // Generate final values for proof (line 55-60) + let t_blinding_poly = util::Poly2( + self.z * self.z * self.offset_z * self.v_blinding, + self.t_1_blinding, + self.t_2_blinding, + ); + + let t_x = self.t_poly.eval(pc.x); + let t_x_blinding = t_blinding_poly.eval(pc.x); + let e_blinding = self.a_blinding + self.s_blinding * &pc.x; + let l_vec = self.l_poly.eval(pc.x); + let r_vec = self.r_poly.eval(pc.x); + + ProofShare { + value_commitment: self.value_commitment.clone(), + poly_commitment: self.poly_commitment.clone(), // TODO: remove clone + t_x_blinding, + t_x, + e_blinding, + l_vec, + r_vec, + } + } +} diff --git a/src/aggregated_range_proof/tests.rs b/src/aggregated_range_proof/tests.rs new file mode 100644 index 00000000..9e69c757 --- /dev/null +++ b/src/aggregated_range_proof/tests.rs @@ -0,0 +1,106 @@ +use curve25519_dalek::scalar::Scalar; +use proof_transcript::ProofTranscript; +use rand::Rng; +use std::iter; + +use super::dealer::*; +use super::messages::*; +use super::party::*; + +#[cfg(test)] +mod tests { + use super::*; + use rand::OsRng; + + fn create_multi(rng: &mut R, values: Vec, n: usize) -> Proof { + use generators::{Generators, PedersenGenerators}; + + let m = values.len(); + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let parties: Vec<_> = values + .iter() + .map(|&v| { + let v_blinding = Scalar::random(rng); + Party::new(v, v_blinding, n, &generators) + }) + .collect(); + + let dealer = Dealer::new(&mut transcript, n, m); + + let (parties, value_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .enumerate() + .map(|(j, p)| p.assign_position(j, rng)) + .unzip(); + + let (dealer, value_challenge) = + dealer.receive_value_commitments(&mut transcript, &value_commitments); + + let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .map(|p| p.apply_challenge(&value_challenge, rng)) + .unzip(); + + let (dealer, poly_challenge) = + dealer.receive_poly_commitments(&mut transcript, &poly_commitments); + + let proof_shares: Vec = parties + .into_iter() + .map(|p| p.apply_challenge(&poly_challenge)) + .collect(); + + dealer.receive_shares( + &mut transcript, + &proof_shares, + &generators.all(), + value_challenge.y, + ) + } + + fn test_u32(m: usize) { + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v: Vec = iter::repeat(()) + .map(|()| rng.next_u32() as u64) + .take(m) + .collect(); + let rp = create_multi(&mut rng, v, 32); + assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + } + + fn test_u64(m: usize) { + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); + let rp = create_multi(&mut rng, v, 64); + assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + } + + #[test] + fn one_value() { + test_u32(1); + test_u64(1); + } + + #[test] + fn two_values() { + test_u32(2); + test_u64(2); + } + + #[test] + fn four_values() { + test_u32(4); + test_u64(4); + } + + #[test] + fn eight_values() { + test_u32(8); + test_u64(8); + } +} diff --git a/src/generators.rs b/src/generators.rs index bf5f73af..129c2064 100644 --- a/src/generators.rs +++ b/src/generators.rs @@ -77,6 +77,7 @@ pub struct Generators { } /// Represents a view into `Generators` relevant to a specific range proof. +#[derive(Copy, Clone)] pub struct GeneratorsView<'a> { /// Bases for Pedersen commitments pub pedersen_generators: &'a PedersenGenerators, @@ -87,7 +88,7 @@ pub struct GeneratorsView<'a> { } /// Represents a pair of base points for Pedersen commitments. -#[derive(Clone)] +#[derive(Copy, Clone)] pub struct PedersenGenerators { /// Base for the committed value pub B: RistrettoPoint, @@ -100,7 +101,7 @@ impl PedersenGenerators { /// Constructs a pair of Pedersen generators /// from a pair of generators provided by the user. pub fn new(B: RistrettoPoint, B_blinding: RistrettoPoint) -> Self { - PedersenGenerators{B,B_blinding} + PedersenGenerators { B, B_blinding } } /// Creates a Pedersen commitment using the value scalar and a blinding factor. @@ -112,8 +113,12 @@ impl PedersenGenerators { impl Default for PedersenGenerators { fn default() -> Self { PedersenGenerators { - B: GeneratorsChain::new(b"Bulletproofs.Generators.B").next().unwrap(), - B_blinding: GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding").next().unwrap() + B: GeneratorsChain::new(b"Bulletproofs.Generators.B") + .next() + .unwrap(), + B_blinding: GeneratorsChain::new(b"Bulletproofs.Generators.B_blinding") + .next() + .unwrap(), } } } diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 1ba3f349..94d249a8 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -2,11 +2,11 @@ #![doc(include = "../docs/inner-product-protocol.md")] -use std::iter; use std::borrow::Borrow; +use std::iter; -use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use proof_transcript::ProofTranscript; @@ -222,7 +222,6 @@ impl InnerProductProof { } } - /// Computes an inner product of two vectors /// \\[ /// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. @@ -239,7 +238,6 @@ pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar { out } - #[cfg(test)] mod tests { use super::*; @@ -251,7 +249,7 @@ mod tests { fn test_helper_create(n: usize) { let mut rng = OsRng::new().unwrap(); - use generators::{PedersenGenerators,Generators}; + use generators::{Generators, PedersenGenerators}; let gens = Generators::new(PedersenGenerators::default(), n, 1); let G = gens.share(0).G.to_vec(); let H = gens.share(0).H.to_vec(); diff --git a/src/lib.rs b/src/lib.rs index 0fcb7302..5540b333 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,11 +29,12 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} -mod proof_transcript; +mod aggregated_range_proof; mod generators; -mod range_proof; mod inner_product_proof; +mod proof_transcript; +mod range_proof; +pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; -pub use generators::{PedersenGenerators, Generators, GeneratorsView}; diff --git a/src/range_proof.rs b/src/range_proof.rs index cffa26c3..df26f8e6 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -7,10 +7,10 @@ use rand::Rng; use std::iter; -use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::ristretto; -use curve25519_dalek::traits::IsIdentity; +use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::IsIdentity; use inner_product_proof::InnerProductProof; @@ -77,7 +77,9 @@ impl RangeProof { let G = generators.G.to_vec(); let H = generators.H.to_vec(); - let V = generators.pedersen_generators.commit(Scalar::from_u64(v), *v_blinding); + let V = generators + .pedersen_generators + .commit(Scalar::from_u64(v), *v_blinding); let a_blinding = Scalar::random(rng); @@ -99,7 +101,9 @@ impl RangeProof { // Compute S = + + s_blinding * B_blinding. let S = ristretto::multiscalar_mul( iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), - iter::once(&generators.pedersen_generators.B_blinding).chain(G.iter()).chain(H.iter()), + iter::once(&generators.pedersen_generators.B_blinding) + .chain(G.iter()) + .chain(H.iter()), ); // Commit to V, A, S and get challenges y, z @@ -135,8 +139,12 @@ impl RangeProof { // Form commitments T_1, T_2 to t.1, t.2 let t_1_blinding = Scalar::random(rng); let t_2_blinding = Scalar::random(rng); - let T_1 = generators.pedersen_generators.commit(t_poly.1, t_1_blinding); - let T_2 = generators.pedersen_generators.commit(t_poly.2, t_2_blinding); + let T_1 = generators + .pedersen_generators + .commit(t_poly.1, t_1_blinding); + let T_2 = generators + .pedersen_generators + .commit(t_poly.2, t_2_blinding); // Commit to T_1, T_2 to get the challenge point x transcript.commit(T_1.compress().as_bytes()); @@ -342,7 +350,7 @@ mod tests { use bincode; // Both prover and verifier have access to the generators and the proof - use generators::{PedersenGenerators,Generators}; + use generators::{Generators, PedersenGenerators}; let generators = Generators::new(PedersenGenerators::default(), n, 1); // Serialized proof data @@ -371,7 +379,8 @@ mod tests { proof_bytes = bincode::serialize(&range_proof).unwrap(); let gens = generators.share(0); - value_commitment = gens.pedersen_generators.commit(Scalar::from_u64(v), v_blinding); + value_commitment = gens.pedersen_generators + .commit(Scalar::from_u64(v), v_blinding); } println!( @@ -395,7 +404,8 @@ mod tests { generators.share(0), &mut transcript, &mut rng, - n) + n + ) .is_ok() ); @@ -408,7 +418,8 @@ mod tests { generators.share(0), &mut transcript, &mut rng, - n) + n + ) .is_err() ); } diff --git a/src/util.rs b/src/util.rs index 00c4a8a1..6207b49c 100644 --- a/src/util.rs +++ b/src/util.rs @@ -83,6 +83,22 @@ impl Poly2 { } } +/// Raises `x` to the power `n` using binary exponentiation, +/// with (1 to 2)*lg(n) scalar multiplications. +/// TODO: a consttime version of this would be awfully similar to a Montgomery ladder. +pub fn scalar_exp_vartime(x: &Scalar, mut n: u64) -> Scalar { + let mut result = Scalar::one(); + let mut aux = *x; // x, x^2, x^4, x^8, ... + while n > 0 { + let bit = n & 1; + if bit == 1 { + result = result * aux; + } + n = n >> 1; + aux = aux * aux; // FIXME: one unnecessary mult at the last step here! + } + result +} #[cfg(test)] mod tests { @@ -97,4 +113,48 @@ mod tests { assert_eq!(exp_2[2], Scalar::from_u64(4)); assert_eq!(exp_2[3], Scalar::from_u64(8)); } + + #[test] + fn test_inner_product() { + let a = vec![ + Scalar::from_u64(1), + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + ]; + let b = vec![ + Scalar::from_u64(2), + Scalar::from_u64(3), + Scalar::from_u64(4), + Scalar::from_u64(5), + ]; + assert_eq!(Scalar::from_u64(40), inner_product(&a, &b)); + } + + /// Raises `x` to the power `n`. + pub fn scalar_exp_vartime_slow(x: &Scalar, n: u64) -> Scalar { + let mut result = Scalar::one(); + for _ in 0..n { + result = result * x; + } + result + } + + #[test] + fn scalar_exp() { + let x = Scalar::from_bits( + *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", + ); + assert_eq!(scalar_exp_vartime(&x, 0), Scalar::one()); + assert_eq!(scalar_exp_vartime(&x, 1), x); + assert_eq!(scalar_exp_vartime(&x, 2), x * x); + assert_eq!(scalar_exp_vartime(&x, 3), x * x * x); + assert_eq!(scalar_exp_vartime(&x, 4), x * x * x * x); + assert_eq!(scalar_exp_vartime(&x, 5), x * x * x * x * x); + assert_eq!(scalar_exp_vartime(&x, 64), scalar_exp_vartime_slow(&x, 64)); + assert_eq!( + scalar_exp_vartime(&x, 0b11001010), + scalar_exp_vartime_slow(&x, 0b11001010) + ); + } } From e9f876e8640eeeec21ae5cd10da654e8e66bd26c Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 16:54:43 -0700 Subject: [PATCH 113/186] Squashed commit of the following: commit 9f3aa5d97bd205d3e74dbc9ec7d084dcaa630054 Author: Henry de Valence Date: Tue Apr 24 16:29:27 2018 -0700 rename Dealer states for consistency commit 2ce152e74adebabcd5da77b7587bc20be74abb1a Merge: ece8198 7ef75ba Author: Henry de Valence Date: Tue Apr 24 16:25:18 2018 -0700 Merge branch 'multi_rp' of github.com:chain/ristretto-bulletproofs into multi_rp commit ece81985d69e39f4b1e47197dd2d1d2e8acbbf65 Author: Henry de Valence Date: Tue Apr 24 16:22:59 2018 -0700 expose aggregation api commit e7467f95600a41532cbbaee704af62358ea5aa75 Author: Henry de Valence Date: Tue Apr 24 16:17:48 2018 -0700 move tests into module directly commit aacbfb56680d266576ab8dac6badfcd519dda8ee Author: Henry de Valence Date: Tue Apr 24 16:02:30 2018 -0700 expose aggregation api commit b511ba078b3644c0a2633addfe42df1370b1d467 Author: Henry de Valence Date: Tue Apr 24 16:02:17 2018 -0700 remove some clones commit f7023dd5b7ad4ec5a1d26e357f0d59ddf1d58cb8 Author: Henry de Valence Date: Tue Apr 24 15:54:49 2018 -0700 use conditional select commit 8f224c7c47eedafd4081762d3ab89b7ea299badd Author: Henry de Valence Date: Tue Apr 24 15:54:31 2018 -0700 add traits to messages commit cc0c9ef7bbaca0435f612d0e1970ce2e9e214f26 Author: Henry de Valence Date: Tue Apr 24 15:54:17 2018 -0700 clarify comment commit 7ef75ba191a15625127fe9a16da052e0dfbb4c14 Author: Cathie Date: Tue Apr 24 13:58:15 2018 -0700 error handling fixups commit ae72cdc489f6c7f6a87175a5a400169e78e5c329 Author: Cathie Date: Thu Apr 19 11:20:05 2018 -0700 cargo fmt commit 355467d7f14f6756c35cc25e331c77c922e92c34 Author: Cathie Date: Thu Apr 19 11:18:13 2018 -0700 Make states consume self, also pass in transcript to dealers instaed of storing it directly commit 29316ac8e94681efe6ba04e34b09281f898ab13a Author: Cathie Date: Wed Apr 18 19:13:48 2018 -0700 use seperate test file, move create_multi to test file commit b5f98c522b3097df4d5a5ae6d65a66d463dc6668 Author: Cathie Date: Wed Apr 18 19:03:31 2018 -0700 remove l, r from final proof commit dc0fa3a23232441d4bd2929b2761f15a8e44ad60 Author: Cathie Date: Wed Apr 18 18:19:24 2018 -0700 add Valuechallenge and PolyChallenge messages commit 7dd9d31973dc9610c5b13447528aaa17828d8d96 Author: Cathie Date: Tue Apr 17 12:40:43 2018 -0700 break aggregated_rp into multiple files commit 7da8123aad0233932ccc883fcaf788ee3677d1c1 Merge: 6078876 525cf93 Author: Cathie Date: Tue Apr 17 11:47:23 2018 -0700 merge with main commit 6078876677e159eb1c8fdc25908a71b3b58db888 Author: Cathie Date: Thu Mar 22 14:22:11 2018 -0700 clean up code commit 512c664cf2ac16c973da71eb24d29cdb1ab6399e Author: Cathie Date: Thu Mar 22 14:10:16 2018 -0700 fix P calculation for aggregated version commit c67ef503f532db529064e3811be1ac6f5b01f828 Author: Cathie Date: Thu Mar 22 13:12:38 2018 -0700 fix t_check by fixing delta, t_check passes all j commit 8d889bced1de9521b8391bd85a4cf2baf097c9e7 Author: Cathie Date: Thu Mar 22 13:10:30 2018 -0700 debugging - breaking mega_check into pieces commit 6facad5c18f57346f67c302126ebdbccc2fd5782 Author: Cathie Date: Thu Mar 22 11:44:38 2018 -0700 updated tests commit 1537605a06ba17148af34d6edd0d9787b043dfde Author: Cathie Date: Thu Mar 22 11:02:31 2018 -0700 debugging - passes 1 & 2 but not 3 commit 1c66ded0bf53dcfd1a58eeb187f9867a937a4471 Author: Cathie Date: Wed Mar 21 17:36:03 2018 -0700 implemented aggregated & optimized verifier math, but only passes for j=1... WIP commit 097fd7fb3bd62229972e7003716423e0f2e44cf6 Author: Cathie Date: Wed Mar 21 14:35:57 2018 -0700 update delta function - still only passes j=1 case, not done with all verifier math updates commit 8ecb4fa7f8208c8a1c9fe3f426467485ee1ed688 Author: Cathie Date: Wed Mar 21 13:55:50 2018 -0700 clean up code, some renames commit 45637cc9d0a8caf752a2cb0fe89564153fc1cba7 Author: Cathie Date: Wed Mar 21 13:49:28 2018 -0700 add ipp, single value optimizations to aggregated range proof - did not update verifier math, so it only passes the one party check. commit c31f551828f35df4993ebaf3c75c5b90272a20db Author: Cathie Date: Mon Mar 19 18:22:16 2018 -0700 cleanup commit 1eccd45af62794fb31f94c5659433072fdd3469c Author: Cathie Date: Mon Mar 19 17:41:43 2018 -0700 rearrange structs to be next to impl commit 3194ed681c5923c5224277c237e2bf296144864e Author: Cathie Date: Mon Mar 19 17:40:41 2018 -0700 add vecPoly2 and PolyDeg3 ops to util file commit f9aa757458f86318f084fd528b209e07f5b5651b Merge: 60de532 6b017d1 Author: Henry de Valence Date: Mon Mar 19 17:10:22 2018 -0700 Merge branch 'master' into multi_rp commit 60de53274d101339b893eaecc70ebe2edb5768b8 Author: Cathie Date: Mon Mar 19 16:47:21 2018 -0700 some renaming commit 5b6bf6226d658074fc852573812dce6d573304d2 Author: Cathie Date: Mon Mar 19 16:35:33 2018 -0700 reformat, change generator passing commit e8583bfab14c81266252f3276ccabf6838c94386 Author: Oleg Andreev Date: Wed Mar 14 17:33:01 2018 -0700 revert single range proof commit ec9c59cab6517350dbcae227253e775ffa87ed1e Merge: 2fb1c2f fc6791b Author: Oleg Andreev Date: Wed Mar 14 17:31:33 2018 -0700 merge multi RP fixes commit fc6791be88fd9eb4964f54440214c4778df095ef Author: Oleg Andreev Date: Wed Mar 14 17:25:21 2018 -0700 cleanup warnings commit a44e6ee15db3476d1ebec0d24f6cd8a1915931a9 Author: Oleg Andreev Date: Wed Mar 14 17:22:07 2018 -0700 tests for the multi-party RP commit 5b1888b17b0c56dc81d44f1a81143160c7447e38 Author: Oleg Andreev Date: Wed Mar 14 17:01:36 2018 -0700 wip commit 4c4e4c6907370e4979b0c3130e7f6d31e21151e2 Author: Oleg Andreev Date: Wed Mar 14 16:51:23 2018 -0700 switch to new generators commit 405e03836e394c5a4d3203db0ccd0dd95b58ba75 Author: Oleg Andreev Date: Wed Mar 14 16:16:46 2018 -0700 wip commit 6a66a96b7c2cf13673f7aa2482586da55c486921 Author: Oleg Andreev Date: Tue Mar 13 17:50:14 2018 -0700 wip commit 3f8468a307a8723423502c4a08c1432a2e1bc8ed Author: Oleg Andreev Date: Tue Mar 13 17:41:53 2018 -0700 wip commit ff8e8394f29baa7f3dd4d14cb142594275a13463 Author: Oleg Andreev Date: Tue Mar 13 15:28:51 2018 -0700 wip commit f0f962adf3e28ede1b2d42d98461bb71a5f21da8 Author: Oleg Andreev Date: Tue Mar 13 14:25:32 2018 -0700 scalar pow funcs commit 00d5581880627a8d81d90e062b2ba3c32c0576bc Author: Oleg Andreev Date: Tue Mar 13 11:25:22 2018 -0700 wip commit d19a84c6443b8095b6fc26d59b082b325f7e46b7 Author: Oleg Andreev Date: Tue Mar 13 10:59:37 2018 -0700 j = 0..m-1 commit 011c49789a4f4d1c4a23ca42df4f6a0f1c6aa174 Author: Oleg Andreev Date: Mon Mar 12 23:01:49 2018 -0700 wip on refactoring commit 5477d4e666a5d8cd3d7ea382d6763c49a3604a79 Author: Cathie Date: Mon Mar 12 18:54:20 2018 -0700 Move logic to new functions, still broken commit 30fe8ec29a3e594266df589d03ebe0bc5b70db7b Author: Cathie Date: Mon Mar 12 18:27:43 2018 -0700 write out struct & method outlines commit e50f97dd504d9f0fb01e5f6718480d51a708f613 Author: Cathie Date: Mon Mar 12 17:12:10 2018 -0700 add new structs, wip - currently broken commit d24bae8aed81dcdf40169f2daefc72e52e70d0f6 Author: Cathie Date: Thu Mar 8 14:02:23 2018 -0800 relabel randomness to be more readable commit d90e12c9164c87b4fee18ee99fbb6dbc390b1944 Author: Cathie Date: Thu Mar 8 12:04:54 2018 -0800 change j indexing to start at 0 instead of 1 commit 954c0b502e11874a4b10b32dae2823a065e17344 Author: Oleg Andreev Date: Thu Mar 8 11:47:06 2018 -0800 test fixes commit 42664c424a5eedeb97f263df5a7f4ea0bb700934 Author: Oleg Andreev Date: Thu Mar 8 11:36:08 2018 -0800 fmt, more commits in RO commit b750314cae01d442e6cac52931051d53e8e3cabc Author: Cathie Date: Thu Mar 8 11:19:37 2018 -0800 use random oracle instead of commit-hash function commit 15f7d8a473a9ff57c4542afec8f51403420f5248 Author: Cathie Date: Tue Mar 6 13:50:49 2018 -0800 wip commit 95af6b9d7739c1c77f5457775e9ae3483ed823e2 Author: Cathie Date: Mon Mar 5 16:19:10 2018 -0800 change to m=10 proof because it makes oleg happier commit 66f36e8202e76a6c01913f68b18fccdef5673269 Author: Cathie Date: Mon Mar 5 16:11:30 2018 -0800 generalize to work for arbitrary # of parties commit 1a25bcf921c52aa8ea75be434efea94395c33973 Author: Cathie Date: Mon Mar 5 15:49:24 2018 -0800 generalize create method for j parties instead of just 2 commit 687c8820643cb7d8a6e71193a72f35c99839ab7b Author: Cathie Date: Mon Mar 5 15:16:42 2018 -0800 fix 2^n math, now it works for 2 parties commit 401cac1d20eefba62404873d118bf492cc809d27 Author: Cathie Date: Mon Mar 5 14:42:29 2018 -0800 added new delta definition, now check_one doesnt work commit 8594096942ce30be673b2eb8906ad1c5c30a0c26 Author: Cathie Date: Mon Mar 5 14:25:15 2018 -0800 change combine function to create new proof commit b9625e45d4ae720641a1dc9ef2dddaf7250fe65b Author: Oleg Andreev Date: Mon Mar 5 12:03:19 2018 -0800 wip commit cc4db3ec1df36b31a74244c6c55cfbf1e67d1aab Author: Oleg Andreev Date: Mon Mar 5 11:16:02 2018 -0800 explicit rust nightly via rust-toolchain, some api fixes in rp commit 083610fcefb668dd20febe5351d9f78ba26eb007 Author: Cathie Date: Fri Mar 2 17:46:38 2018 -0800 add combiner and prove_two functions, doesnt work yet commit ae54b2bcb825adaf04c6e456150d55ac75c7733a Author: Cathie Date: Fri Mar 2 16:57:18 2018 -0800 fix aggregation verifier logic commit 1626a3e77efc53690c91450a809eb7c441cda255 Author: Cathie Date: Fri Mar 2 15:19:32 2018 -0800 add aggregation math, works for j=1 commit cfe72fb7425fc290a8451d57f5277db935bef291 Author: Cathie Date: Fri Mar 2 14:52:39 2018 -0800 add clone trait, runs & passes tests now commit 8999202a0b86c53090772a080020de1695d1010a Author: Cathie Date: Fri Mar 2 14:42:26 2018 -0800 restructure to make commitments, rename parts commit d98795a139c1abc0ba5c954d927b472ebd7e68fa Author: Cathie Date: Fri Mar 2 11:47:13 2018 -0800 verification for state machine range proofs commit ff4c4d9bcf27841f37200c4d24262ebf1df6fe33 Author: Cathie Date: Thu Mar 1 14:44:51 2018 -0800 starting on state machine for rangeproof commit f8a5780f553bad853b693501aed8e4a825b7bfb9 Author: Cathie Date: Thu Mar 1 13:51:12 2018 -0800 add j, m to normal range proof commit 2fb1c2f506d4ad932d2e1de24e254aefef797cf0 Author: Cathie Date: Mon Mar 12 18:54:20 2018 -0700 Move logic to new functions, still broken commit 1c9ab136db4e23dab8d6923efc0d7da0b277b72a Author: Cathie Date: Mon Mar 12 18:27:43 2018 -0700 write out struct & method outlines commit 5030a06236cb13ab20da4277ccbfd59e542c0d9d Merge: 8c5eb03 07926df Author: Cathie Date: Mon Mar 12 17:16:04 2018 -0700 Merge branch 'master' of github.com:chain/ristretto-bulletproofs into multi_rp commit 8c5eb0326181a07b5d0ae293371f1d98fd953455 Author: Cathie Date: Mon Mar 12 17:12:10 2018 -0700 add new structs, wip - currently broken commit 86b6d986a66d23394f82fd08a5efa7d4a6fd67d9 Author: Cathie Date: Thu Mar 8 14:02:23 2018 -0800 relabel randomness to be more readable commit e3c3e4d37b5d8614297609efb5fe8350f9a409db Author: Cathie Date: Thu Mar 8 12:04:54 2018 -0800 change j indexing to start at 0 instead of 1 commit 5c0ac88adf8a88d111d6d0ad3a9498fcc502126e Author: Oleg Andreev Date: Thu Mar 8 11:47:06 2018 -0800 test fixes commit 56ab0786e41d7d9a0a9b3b36c6919918bcc4f898 Author: Oleg Andreev Date: Thu Mar 8 11:36:08 2018 -0800 fmt, more commits in RO commit 926927ace299c187657eac779275c958b4e0a219 Author: Cathie Date: Thu Mar 8 11:19:37 2018 -0800 use random oracle instead of commit-hash function commit caf38607e6f619926fb0d150b47e68ed695fe23d Author: Cathie Date: Tue Mar 6 13:50:49 2018 -0800 wip commit 84ab99cb17651312a79fbe05ba52ed7f9c89fa79 Author: Cathie Date: Mon Mar 5 16:19:10 2018 -0800 change to m=10 proof because it makes oleg happier commit c2c4a8d0dd8df60044fb19704e8cd867f46909fd Author: Cathie Date: Mon Mar 5 16:11:30 2018 -0800 generalize to work for arbitrary # of parties commit 477cbdd0756e9b6f3f1453683a1709980efd07e1 Author: Cathie Date: Mon Mar 5 15:49:24 2018 -0800 generalize create method for j parties instead of just 2 commit 3656e1b65ba7a3514982d8885cac33e42fad412e Author: Cathie Date: Mon Mar 5 15:16:42 2018 -0800 fix 2^n math, now it works for 2 parties commit bb97df8d6f8ebefc998c0024da050e640e351294 Author: Cathie Date: Mon Mar 5 14:42:29 2018 -0800 added new delta definition, now check_one doesnt work commit 3f7bc205024e81adfbe08440a2b14a471450c431 Author: Cathie Date: Mon Mar 5 14:25:15 2018 -0800 change combine function to create new proof commit 7bd3588e374b92546611a0cabefa7e54ee8539e3 Author: Cathie Date: Mon Mar 5 12:16:27 2018 -0800 newer better fresher rust that actually works commit d85bffe484130aeb01311fb3b4d731f34a03f20a Author: Oleg Andreev Date: Mon Mar 5 12:03:19 2018 -0800 wip commit 3b5a51f39dc7598d3a6f78180b559bf0603f3d74 Author: Oleg Andreev Date: Mon Mar 5 11:16:02 2018 -0800 explicit rust nightly via rust-toolchain, some api fixes in rp commit c76c10fa33fc1b25bd985352d4725fbe6724ab81 Author: Cathie Date: Fri Mar 2 17:46:38 2018 -0800 add combiner and prove_two functions, doesnt work yet commit 66f551d9229c3253a5f1d3254d9735a3dbe3d3a0 Author: Cathie Date: Fri Mar 2 16:57:18 2018 -0800 fix aggregation verifier logic commit 837bd22ec9c45962b159df71ce9d101ccd7d60eb Author: Cathie Date: Fri Mar 2 15:19:32 2018 -0800 add aggregation math, works for j=1 commit 2fdac5f2a44270860145fabeb5ecc9581d40f6d8 Author: Cathie Date: Fri Mar 2 14:52:39 2018 -0800 add clone trait, runs & passes tests now commit 7f2bc6d35dbdec5b57f6e428d0f1fc959a61a037 Author: Cathie Date: Fri Mar 2 14:42:26 2018 -0800 restructure to make commitments, rename parts commit b706b601d8c1019f0bd3260d58dd23c8c3cedb63 Author: Cathie Date: Fri Mar 2 11:47:13 2018 -0800 verification for state machine range proofs commit ae6dbff3ed70c7339908d8ce5b3b3e98f5b3701c Author: Cathie Date: Thu Mar 1 14:44:51 2018 -0800 starting on state machine for rangeproof commit abe60a0a4c47b5fbf6823ea9ba369af96d8f34c8 Author: Cathie Date: Thu Mar 1 13:51:12 2018 -0800 add j, m to normal range proof --- src/aggregated_range_proof/dealer.rs | 29 +++---- src/aggregated_range_proof/messages.rs | 9 +- src/aggregated_range_proof/mod.rs | 113 ++++++++++++++++++++++++- src/aggregated_range_proof/party.rs | 30 +++---- src/aggregated_range_proof/tests.rs | 106 ----------------------- src/lib.rs | 3 +- 6 files changed, 144 insertions(+), 146 deletions(-) delete mode 100644 src/aggregated_range_proof/tests.rs diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 3028585e..e025770f 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -4,7 +4,6 @@ use curve25519_dalek::traits::Identity; use generators::GeneratorsView; use inner_product_proof; use proof_transcript::ProofTranscript; -use std::clone::Clone; use util; use super::messages::*; @@ -13,31 +12,31 @@ use super::messages::*; pub struct Dealer {} impl Dealer { - /// Creates a new dealer with the given parties and a number of bits + /// Creates a new dealer coordinating `m` parties proving `n`-bit ranges. pub fn new( transcript: &mut ProofTranscript, n: usize, m: usize, - ) -> DealerAwaitingValues { + ) -> DealerAwaitingValueCommitments { transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); - DealerAwaitingValues { n, m } + DealerAwaitingValueCommitments { n, m } } } /// When the dealer is initialized, it only knows the size of the set. -pub struct DealerAwaitingValues { +pub struct DealerAwaitingValueCommitments { n: usize, m: usize, } -impl DealerAwaitingValues { +impl DealerAwaitingValueCommitments { /// Combines commitments and computes challenge variables. pub fn receive_value_commitments( self, transcript: &mut ProofTranscript, vc: &Vec, - ) -> (DealerAwaitingPoly, ValueChallenge) { + ) -> (DealerAwaitingPolyCommitments, ValueChallenge) { assert!(vc.len() == self.m); let mut A = RistrettoPoint::identity(); let mut S = RistrettoPoint::identity(); @@ -57,20 +56,20 @@ impl DealerAwaitingValues { let y = transcript.challenge_scalar(); let z = transcript.challenge_scalar(); - (DealerAwaitingPoly { n: self.n }, ValueChallenge { y, z }) + (DealerAwaitingPolyCommitments { n: self.n }, ValueChallenge { y, z }) } } -pub struct DealerAwaitingPoly { +pub struct DealerAwaitingPolyCommitments { n: usize, } -impl DealerAwaitingPoly { +impl DealerAwaitingPolyCommitments { pub fn receive_poly_commitments( self, transcript: &mut ProofTranscript, poly_commitments: &Vec, - ) -> (DealerAwaitingShares, PolyChallenge) { + ) -> (DealerAwaitingProofShares, PolyChallenge) { // Commit sums of T1s and T2s. let mut T1 = RistrettoPoint::identity(); let mut T2 = RistrettoPoint::identity(); @@ -83,15 +82,15 @@ impl DealerAwaitingPoly { let x = transcript.challenge_scalar(); - (DealerAwaitingShares { n: self.n }, PolyChallenge { x }) + (DealerAwaitingProofShares { n: self.n }, PolyChallenge { x }) } } -pub struct DealerAwaitingShares { +pub struct DealerAwaitingProofShares { n: usize, } -impl DealerAwaitingShares { +impl DealerAwaitingProofShares { pub fn receive_shares( self, transcript: &mut ProofTranscript, @@ -101,7 +100,7 @@ impl DealerAwaitingShares { ) -> Proof { let value_commitments = proof_shares .iter() - .map(|ps| ps.value_commitment.V.clone()) + .map(|ps| ps.value_commitment.V) .collect(); let A = proof_shares .iter() diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index d1d38b2c..6089d4fa 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -9,29 +9,31 @@ use rand::Rng; use std::iter; use util; -#[derive(Clone)] +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct ValueCommitment { pub V: RistrettoPoint, pub A: RistrettoPoint, pub S: RistrettoPoint, } +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct ValueChallenge { pub y: Scalar, pub z: Scalar, } -#[derive(Clone)] +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct PolyCommitment { pub T_1: RistrettoPoint, pub T_2: RistrettoPoint, } +#[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct PolyChallenge { pub x: Scalar, } -#[derive(Clone)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct ProofShare { pub value_commitment: ValueCommitment, pub poly_commitment: PolyCommitment, @@ -44,6 +46,7 @@ pub struct ProofShare { pub r_vec: Vec, } +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct Proof { pub n: usize, /// Commitment to the value diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d42f265f..145332ff 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -1,6 +1,111 @@ #![allow(non_snake_case)] -mod dealer; -mod messages; -mod party; -mod tests; +pub mod dealer; +pub mod messages; +pub mod party; + +#[cfg(test)] +mod tests { + use std::iter; + + use rand::{Rng, OsRng}; + + use curve25519_dalek::scalar::Scalar; + use proof_transcript::ProofTranscript; + + use super::dealer::*; + use super::messages::*; + use super::party::*; + + fn create_multi(rng: &mut R, values: Vec, n: usize) -> Proof { + use generators::{Generators, PedersenGenerators}; + + let m = values.len(); + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let parties: Vec<_> = values + .iter() + .map(|&v| { + let v_blinding = Scalar::random(rng); + Party::new(v, v_blinding, n, &generators) + }) + .collect(); + + let dealer = Dealer::new(&mut transcript, n, m); + + let (parties, value_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .enumerate() + .map(|(j, p)| p.assign_position(j, rng)) + .unzip(); + + let (dealer, value_challenge) = + dealer.receive_value_commitments(&mut transcript, &value_commitments); + + let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .map(|p| p.apply_challenge(&value_challenge, rng)) + .unzip(); + + let (dealer, poly_challenge) = + dealer.receive_poly_commitments(&mut transcript, &poly_commitments); + + let proof_shares: Vec = parties + .into_iter() + .map(|p| p.apply_challenge(&poly_challenge)) + .collect(); + + dealer.receive_shares( + &mut transcript, + &proof_shares, + &generators.all(), + value_challenge.y, + ) + } + + fn test_u32(m: usize) { + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v: Vec = iter::repeat(()) + .map(|()| rng.next_u32() as u64) + .take(m) + .collect(); + let rp = create_multi(&mut rng, v, 32); + assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + } + + fn test_u64(m: usize) { + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); + let rp = create_multi(&mut rng, v, 64); + assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + } + + #[test] + fn one_value() { + test_u32(1); + test_u64(1); + } + + #[test] + fn two_values() { + test_u32(2); + test_u64(2); + } + + #[test] + fn four_values() { + test_u32(4); + test_u64(4); + } + + #[test] + fn eight_values() { + test_u32(8); + test_u64(8); + } +} diff --git a/src/aggregated_range_proof/party.rs b/src/aggregated_range_proof/party.rs index 7d5cd95e..47202b83 100644 --- a/src/aggregated_range_proof/party.rs +++ b/src/aggregated_range_proof/party.rs @@ -3,7 +3,6 @@ use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use generators::Generators; use rand::Rng; -use std::clone::Clone; use std::iter; use util; @@ -56,14 +55,15 @@ impl<'a> PartyAwaitingPosition<'a> { let a_blinding = Scalar::random(&mut rng); // Compute A = + + a_blinding * B_blinding let mut A = gen_share.pedersen_generators.B_blinding * a_blinding; + + use subtle::{Choice, ConditionallyAssignable}; for i in 0..self.n { - let v_i = (self.v >> i) & 1; - // XXX replace this with a conditional move - if v_i == 1 { - A += gen_share.G[i]; // + bit*G_i - } else { - A -= gen_share.H[i]; // + (bit-1)*H_i - } + // If v_i = 0, we add a_L[i] * G[i] + a_R[i] * H[i] = - H[i] + // If v_i = 1, we add a_L[i] * G[i] + a_R[i] * H[i] = G[i] + let v_i = Choice::from(((self.v >> i) & 1) as u8); + let mut point = -gen_share.H[i]; + point.conditional_assign(&gen_share.G[i], v_i); + A += point; } let s_blinding = Scalar::random(&mut rng); @@ -84,10 +84,9 @@ impl<'a> PartyAwaitingPosition<'a> { n: self.n, v: self.v, v_blinding: self.v_blinding, - - j, generators: self.generators, - value_commitment: value_commitment.clone(), + j, + value_commitment, a_blinding, s_blinding, s_L, @@ -160,8 +159,8 @@ impl<'a> PartyAwaitingValueChallenge<'a> { let poly_commitment = PolyCommitment { T_1, T_2 }; let papc = PartyAwaitingPolyChallenge { - value_commitment: self.value_commitment.clone(), //TODO: remove clone - poly_commitment: poly_commitment.clone(), + value_commitment: self.value_commitment, + poly_commitment, z: vc.z, offset_z, l_poly, @@ -181,7 +180,6 @@ impl<'a> PartyAwaitingValueChallenge<'a> { pub struct PartyAwaitingPolyChallenge { value_commitment: ValueCommitment, poly_commitment: PolyCommitment, - z: Scalar, offset_z: Scalar, l_poly: util::VecPoly1, @@ -210,8 +208,8 @@ impl PartyAwaitingPolyChallenge { let r_vec = self.r_poly.eval(pc.x); ProofShare { - value_commitment: self.value_commitment.clone(), - poly_commitment: self.poly_commitment.clone(), // TODO: remove clone + value_commitment: self.value_commitment, + poly_commitment: self.poly_commitment, t_x_blinding, t_x, e_blinding, diff --git a/src/aggregated_range_proof/tests.rs b/src/aggregated_range_proof/tests.rs deleted file mode 100644 index 9e69c757..00000000 --- a/src/aggregated_range_proof/tests.rs +++ /dev/null @@ -1,106 +0,0 @@ -use curve25519_dalek::scalar::Scalar; -use proof_transcript::ProofTranscript; -use rand::Rng; -use std::iter; - -use super::dealer::*; -use super::messages::*; -use super::party::*; - -#[cfg(test)] -mod tests { - use super::*; - use rand::OsRng; - - fn create_multi(rng: &mut R, values: Vec, n: usize) -> Proof { - use generators::{Generators, PedersenGenerators}; - - let m = values.len(); - let generators = Generators::new(PedersenGenerators::default(), n, m); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let parties: Vec<_> = values - .iter() - .map(|&v| { - let v_blinding = Scalar::random(rng); - Party::new(v, v_blinding, n, &generators) - }) - .collect(); - - let dealer = Dealer::new(&mut transcript, n, m); - - let (parties, value_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .enumerate() - .map(|(j, p)| p.assign_position(j, rng)) - .unzip(); - - let (dealer, value_challenge) = - dealer.receive_value_commitments(&mut transcript, &value_commitments); - - let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .map(|p| p.apply_challenge(&value_challenge, rng)) - .unzip(); - - let (dealer, poly_challenge) = - dealer.receive_poly_commitments(&mut transcript, &poly_commitments); - - let proof_shares: Vec = parties - .into_iter() - .map(|p| p.apply_challenge(&poly_challenge)) - .collect(); - - dealer.receive_shares( - &mut transcript, - &proof_shares, - &generators.all(), - value_challenge.y, - ) - } - - fn test_u32(m: usize) { - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let v: Vec = iter::repeat(()) - .map(|()| rng.next_u32() as u64) - .take(m) - .collect(); - let rp = create_multi(&mut rng, v, 32); - assert!(rp.verify(&mut rng, &mut transcript).is_ok()); - } - - fn test_u64(m: usize) { - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); - let rp = create_multi(&mut rng, v, 64); - assert!(rp.verify(&mut rng, &mut transcript).is_ok()); - } - - #[test] - fn one_value() { - test_u32(1); - test_u64(1); - } - - #[test] - fn two_values() { - test_u32(2); - test_u64(2); - } - - #[test] - fn four_values() { - test_u32(4); - test_u64(4); - } - - #[test] - fn eight_values() { - test_u32(8); - test_u64(8); - } -} diff --git a/src/lib.rs b/src/lib.rs index 5540b333..3c3a387b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,12 +28,11 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} - -mod aggregated_range_proof; mod generators; mod inner_product_proof; mod proof_transcript; mod range_proof; +pub mod aggregated_range_proof; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; From aaa130bbd295b9b6d7e239c1a400a6c66e5c3539 Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 19 Apr 2018 12:05:08 -0700 Subject: [PATCH 114/186] add error checks for m parties on dealer state --- src/aggregated_range_proof/dealer.rs | 64 +++++++++++++++++++++------- src/aggregated_range_proof/mod.rs | 28 +++++++----- 2 files changed, 66 insertions(+), 26 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index e025770f..99bea33f 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -14,17 +14,18 @@ pub struct Dealer {} impl Dealer { /// Creates a new dealer coordinating `m` parties proving `n`-bit ranges. pub fn new( - transcript: &mut ProofTranscript, n: usize, m: usize, - ) -> DealerAwaitingValueCommitments { + transcript: &mut ProofTranscript, + ) -> Result { transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); - DealerAwaitingValueCommitments { n, m } + Ok(DealerAwaitingValueCommitments { n, m }) } } /// When the dealer is initialized, it only knows the size of the set. +#[derive(Debug)] pub struct DealerAwaitingValueCommitments { n: usize, m: usize, @@ -34,14 +35,19 @@ impl DealerAwaitingValueCommitments { /// Combines commitments and computes challenge variables. pub fn receive_value_commitments( self, + value_commitments: &Vec, transcript: &mut ProofTranscript, - vc: &Vec, - ) -> (DealerAwaitingPolyCommitments, ValueChallenge) { - assert!(vc.len() == self.m); + ) -> Result<(DealerAwaitingPolyCommitments, ValueChallenge), &'static str> { + if self.m != value_commitments.len() { + return Err( + "Length of value commitments doesn't match expected length m", + ); + } + let mut A = RistrettoPoint::identity(); let mut S = RistrettoPoint::identity(); - for commitment in vc.iter() { + for commitment in value_commitments.iter() { // Commit each V individually transcript.commit(commitment.V.compress().as_bytes()); @@ -56,20 +62,34 @@ impl DealerAwaitingValueCommitments { let y = transcript.challenge_scalar(); let z = transcript.challenge_scalar(); - (DealerAwaitingPolyCommitments { n: self.n }, ValueChallenge { y, z }) + Ok(( + DealerAwaitingPolyCommitments { + n: self.n, + m: self.m, + }, + ValueChallenge { y, z }, + )) } } +#[derive(Debug)] pub struct DealerAwaitingPolyCommitments { n: usize, + m: usize, } impl DealerAwaitingPolyCommitments { pub fn receive_poly_commitments( self, - transcript: &mut ProofTranscript, poly_commitments: &Vec, - ) -> (DealerAwaitingProofShares, PolyChallenge) { + transcript: &mut ProofTranscript, + ) -> Result<(DealerAwaitingProofShares, PolyChallenge), &'static str> { + if self.m != poly_commitments.len() { + return Err( + "Length of poly commitments doesn't match expected length m", + ); + } + // Commit sums of T1s and T2s. let mut T1 = RistrettoPoint::identity(); let mut T2 = RistrettoPoint::identity(); @@ -82,22 +102,36 @@ impl DealerAwaitingPolyCommitments { let x = transcript.challenge_scalar(); - (DealerAwaitingProofShares { n: self.n }, PolyChallenge { x }) + Ok(( + DealerAwaitingProofShares { + n: self.n, + m: self.m, + }, + PolyChallenge { x }, + )) } } +#[derive(Debug)] pub struct DealerAwaitingProofShares { n: usize, + m: usize, } impl DealerAwaitingProofShares { pub fn receive_shares( self, - transcript: &mut ProofTranscript, proof_shares: &Vec, gen: &GeneratorsView, y: Scalar, - ) -> Proof { + transcript: &mut ProofTranscript, + ) -> Result { + if self.m != proof_shares.len() { + return Err( + "Length of proof shares doesn't match expected length m", + ); + } + let value_commitments = proof_shares .iter() .map(|ps| ps.value_commitment.V) @@ -157,7 +191,7 @@ impl DealerAwaitingProofShares { r_vec.clone(), ); - Proof { + Ok(Proof { n: self.n, value_commitments, A, @@ -168,6 +202,6 @@ impl DealerAwaitingProofShares { t_x_blinding, e_blinding, ipp_proof, - } + }) } } diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 145332ff..2dbabd4c 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -32,7 +32,7 @@ mod tests { }) .collect(); - let dealer = Dealer::new(&mut transcript, n, m); + let dealer = Dealer::new(n, m, &mut transcript).unwrap(); let (parties, value_commitments): (Vec<_>, Vec<_>) = parties .into_iter() @@ -40,28 +40,34 @@ mod tests { .map(|(j, p)| p.assign_position(j, rng)) .unzip(); - let (dealer, value_challenge) = - dealer.receive_value_commitments(&mut transcript, &value_commitments); + // let (a, b) = dealer.receive_value_commitments(&mut transcript, &value_commitments).unwrap(); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&value_commitments, &mut transcript) + .unwrap(); let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties .into_iter() .map(|p| p.apply_challenge(&value_challenge, rng)) .unzip(); - let (dealer, poly_challenge) = - dealer.receive_poly_commitments(&mut transcript, &poly_commitments); + let (dealer, poly_challenge) = dealer + .receive_poly_commitments(&poly_commitments, &mut transcript) + .unwrap(); let proof_shares: Vec = parties .into_iter() .map(|p| p.apply_challenge(&poly_challenge)) .collect(); - dealer.receive_shares( - &mut transcript, - &proof_shares, - &generators.all(), - value_challenge.y, - ) + dealer + .receive_shares( + &proof_shares, + &generators.all(), + value_challenge.y, + &mut transcript, + ) + .unwrap() } fn test_u32(m: usize) { From a8f7d48007601be12011348652a3e490aceb739b Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 19 Apr 2018 12:42:21 -0700 Subject: [PATCH 115/186] setup for doing per-share validation on proof shares --- src/aggregated_range_proof/dealer.rs | 26 ++++++++++++++++++++++---- src/aggregated_range_proof/messages.rs | 11 ++++++++++- src/aggregated_range_proof/mod.rs | 1 - 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 99bea33f..806b9910 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -61,13 +61,15 @@ impl DealerAwaitingValueCommitments { let y = transcript.challenge_scalar(); let z = transcript.challenge_scalar(); + let value_challenge = ValueChallenge { y, z }; Ok(( DealerAwaitingPolyCommitments { n: self.n, m: self.m, + value_challenge: value_challenge.clone(), }, - ValueChallenge { y, z }, + value_challenge, )) } } @@ -76,6 +78,7 @@ impl DealerAwaitingValueCommitments { pub struct DealerAwaitingPolyCommitments { n: usize, m: usize, + value_challenge: ValueChallenge, } impl DealerAwaitingPolyCommitments { @@ -101,13 +104,16 @@ impl DealerAwaitingPolyCommitments { transcript.commit(T2.compress().as_bytes()); let x = transcript.challenge_scalar(); + let poly_challenge = PolyChallenge { x }; Ok(( DealerAwaitingProofShares { n: self.n, m: self.m, + value_challenge: self.value_challenge, + poly_challenge: poly_challenge.clone(), }, - PolyChallenge { x }, + poly_challenge, )) } } @@ -116,6 +122,8 @@ impl DealerAwaitingPolyCommitments { pub struct DealerAwaitingProofShares { n: usize, m: usize, + value_challenge: ValueChallenge, + poly_challenge: PolyChallenge, } impl DealerAwaitingProofShares { @@ -123,7 +131,6 @@ impl DealerAwaitingProofShares { self, proof_shares: &Vec, gen: &GeneratorsView, - y: Scalar, transcript: &mut ProofTranscript, ) -> Result { if self.m != proof_shares.len() { @@ -132,6 +139,17 @@ impl DealerAwaitingProofShares { ); } + for (_j, proof_share) in proof_shares.iter().enumerate() { + if proof_share + .verify_share(&self.value_challenge, &self.poly_challenge) + .is_err() + { + return Err( + "One of the proof shares is invalid", // TODO: print which one (j) is invalid + ); + } + } + let value_commitments = proof_shares .iter() .map(|ps| ps.value_commitment.V) @@ -184,7 +202,7 @@ impl DealerAwaitingProofShares { let ipp_proof = inner_product_proof::InnerProductProof::create( transcript, &Q, - util::exp_iter(y.invert()), + util::exp_iter(self.value_challenge.y.invert()), gen.G.to_vec(), gen.H.to_vec(), l_vec.clone(), diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 6089d4fa..78ef9f18 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -46,7 +46,16 @@ pub struct ProofShare { pub r_vec: Vec, } -#[derive(Serialize, Deserialize, Clone, Debug)] +impl ProofShare { + pub fn verify_share( + &self, + value_challenge: &ValueChallenge, + poly_challenge: &PolyChallenge, + ) -> Result<(), ()> { + Ok(()) + } +} + pub struct Proof { pub n: usize, /// Commitment to the value diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 2dbabd4c..69b7702f 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -64,7 +64,6 @@ mod tests { .receive_shares( &proof_shares, &generators.all(), - value_challenge.y, &mut transcript, ) .unwrap() From d7bf5d591af85958c8036eadced95f4f23727d33 Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 20 Apr 2018 14:50:23 -0700 Subject: [PATCH 116/186] Adding proof share validation math - only passes for the first proof share, WIP --- src/aggregated_range_proof/dealer.rs | 4 +- src/aggregated_range_proof/messages.rs | 94 +++++++++++++++++++++++++- src/lib.rs | 1 + 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 806b9910..39cf929b 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -139,9 +139,9 @@ impl DealerAwaitingProofShares { ); } - for (_j, proof_share) in proof_shares.iter().enumerate() { + for (j, proof_share) in proof_shares.iter().enumerate() { if proof_share - .verify_share(&self.value_challenge, &self.poly_challenge) + .verify_share(self.n, j, &self.value_challenge, &self.poly_challenge) .is_err() { return Err( diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 78ef9f18..382769a7 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -49,9 +49,101 @@ pub struct ProofShare { impl ProofShare { pub fn verify_share( &self, + n: usize, + j: usize, value_challenge: &ValueChallenge, poly_challenge: &PolyChallenge, - ) -> Result<(), ()> { + ) -> Result<(), &'static str> { + use generators::{Generators, PedersenGenerators}; + let generators = Generators::new(PedersenGenerators::default(), n, j+1); + let gen = generators.share(j); + + // renames for convenience + let y = value_challenge.y; + let y_inv = y.invert(); + let z = value_challenge.z; + let zz = z * z; + let minus_z = -z; + let x = poly_challenge.x; + let one = Scalar::one(); + let two = Scalar::from_u64(2); + + if self.t_x != inner_product_proof::inner_product(&self.l_vec, &self.r_vec) { + return Err("Inner product of l_vec and r_vec is not equal to t_x") + } + + // TODO: find a better way to calculate this :( + let mut y_j_inv = one; // y^(-j) when j=0 + let mut y_j = one; // y^j when j=0 + let mut z_j = one; // z^j when j=0 + for _ in 0..j { + y_j_inv = y_j_inv * y_inv; + y_j = y_j * y; + z_j = z_j * z; + } + // if j != 0 { + // y_neg_j = util::exp_iter(value_challenge.y.invert()).take(j).last().unwrap(); // y^(-j) + // z_j = util::exp_iter(value_challenge.z).take(j).last().unwrap(); // z^j + // } + + let g = self.l_vec.iter().map(|l_i| minus_z - l_i ); + let h = self.r_vec.iter() + .zip(util::exp_iter(two)) + .zip(util::exp_iter(y_inv)) + .map(|((r_i, exp_2), exp_y_inv)| + z + + exp_y_inv * y_j_inv * (- r_i) + + exp_y_inv * y_j_inv * (zz * z_j * exp_2) + ); + + let P_check = ristretto::vartime::multiscalar_mul( + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(iter::once(- self.e_blinding)) + .chain(g) + .chain(h), + + iter::once(&self.value_commitment.A) + .chain(iter::once(&self.value_commitment.S)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)) + .chain(gen.G.iter()) + .chain(gen.H.iter()) + ); + + if !P_check.is_identity() { + return Err("P check is not equal to zero") + } + + ///////// calculate delta + // XXX this could be more efficient, esp for powers of 2 + let sum_of_powers_of_y = util::exp_iter(y) + .take(n) + .fold(Scalar::zero(), |acc, x| acc + x); + + // XXX TODO: just calculate (2^n - 1) instead + let sum_of_powers_of_2 = util::exp_iter(two) + .take(n) + .fold(Scalar::zero(), |acc, x| acc + x); + + let delta = (z - zz) * sum_of_powers_of_y * y_j - z * zz * sum_of_powers_of_2 * z_j; + + let t_check = ristretto::vartime::multiscalar_mul( + iter::once(zz * z_j) + .chain(iter::once(x)) + .chain(iter::once(x * x)) + .chain(iter::once(delta - self.t_x)) + .chain(iter::once(-self.t_x_blinding)), + iter::once(&self.value_commitment.V) + .chain(iter::once(&self.poly_commitment.T_1)) + .chain(iter::once(&self.poly_commitment.T_2)) + .chain(iter::once(&gen.pedersen_generators.B)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)) + ); + + if !t_check.is_identity() { + return Err("t check is not equal to zero") + } + Ok(()) } } diff --git a/src/lib.rs b/src/lib.rs index 3c3a387b..2579762d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,3 +37,4 @@ pub mod aggregated_range_proof; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; +pub use aggregated_range_proof::*; From f5b7032f1522e7010f60b874c7f17412f5792172 Mon Sep 17 00:00:00 2001 From: Cathie Date: Fri, 20 Apr 2018 15:40:20 -0700 Subject: [PATCH 117/186] Fixed math, validity check works for all proof shares now --- src/aggregated_range_proof/messages.rs | 71 ++++++++------------------ 1 file changed, 22 insertions(+), 49 deletions(-) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 382769a7..8ed2edde 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -58,42 +58,30 @@ impl ProofShare { let generators = Generators::new(PedersenGenerators::default(), n, j+1); let gen = generators.share(j); - // renames for convenience + // renaming and precomputation + let x = poly_challenge.x; let y = value_challenge.y; - let y_inv = y.invert(); let z = value_challenge.z; let zz = z * z; let minus_z = -z; - let x = poly_challenge.x; - let one = Scalar::one(); - let two = Scalar::from_u64(2); + let z_j = util::exp_iter(z).take(j+1).last().unwrap(); // z^j + let y_jn = util::exp_iter(y).take(j*n+1).last().unwrap(); // y^(j*n) + let y_jn_inv = y_jn.invert(); // y^(-j*n) + let y_inv = y.invert(); // y^(-1) if self.t_x != inner_product_proof::inner_product(&self.l_vec, &self.r_vec) { return Err("Inner product of l_vec and r_vec is not equal to t_x") } - // TODO: find a better way to calculate this :( - let mut y_j_inv = one; // y^(-j) when j=0 - let mut y_j = one; // y^j when j=0 - let mut z_j = one; // z^j when j=0 - for _ in 0..j { - y_j_inv = y_j_inv * y_inv; - y_j = y_j * y; - z_j = z_j * z; - } - // if j != 0 { - // y_neg_j = util::exp_iter(value_challenge.y.invert()).take(j).last().unwrap(); // y^(-j) - // z_j = util::exp_iter(value_challenge.z).take(j).last().unwrap(); // z^j - // } let g = self.l_vec.iter().map(|l_i| minus_z - l_i ); let h = self.r_vec.iter() - .zip(util::exp_iter(two)) + .zip(util::exp_iter(Scalar::from_u64(2))) .zip(util::exp_iter(y_inv)) .map(|((r_i, exp_2), exp_y_inv)| z + - exp_y_inv * y_j_inv * (- r_i) + - exp_y_inv * y_j_inv * (zz * z_j * exp_2) + exp_y_inv * y_jn_inv * (- r_i) + + exp_y_inv * y_jn_inv * (zz * z_j * exp_2) ); let P_check = ristretto::vartime::multiscalar_mul( @@ -114,18 +102,10 @@ impl ProofShare { return Err("P check is not equal to zero") } - ///////// calculate delta - // XXX this could be more efficient, esp for powers of 2 - let sum_of_powers_of_y = util::exp_iter(y) - .take(n) - .fold(Scalar::zero(), |acc, x| acc + x); + let sum_of_powers_of_y = sum_of_powers_of(&y, n); + let sum_of_powers_of_2 = sum_of_powers_of(&Scalar::from_u64(2), n); - // XXX TODO: just calculate (2^n - 1) instead - let sum_of_powers_of_2 = util::exp_iter(two) - .take(n) - .fold(Scalar::zero(), |acc, x| acc + x); - - let delta = (z - zz) * sum_of_powers_of_y * y_j - z * zz * sum_of_powers_of_2 * z_j; + let delta = (z - zz) * sum_of_powers_of_y * y_jn - z * zz * sum_of_powers_of_2 * z_j; let t_check = ristretto::vartime::multiscalar_mul( iter::once(zz * z_j) @@ -269,23 +249,16 @@ impl Proof { /// Compute delta(y,z) = (z - z^2)<1^n*m, y^n*m> + z^3 <1, 2^n*m> * \sum_j=0^(m-1) z^j fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let two = Scalar::from_u64(2); - - // XXX this could be more efficient, esp for powers of 2 - let sum_of_powers_of_y = util::exp_iter(*y) - .take(n * m) - .fold(Scalar::zero(), |acc, x| acc + x); - - // XXX TODO: just calculate (2^n - 1) instead - let sum_of_powers_of_2 = util::exp_iter(two) - .take(n) - .fold(Scalar::zero(), |acc, x| acc + x); + let sum_y = sum_of_powers_of(y, n*m); + let sum_2 = sum_of_powers_of(&Scalar::from_u64(2), n); + let sum_z = sum_of_powers_of(z, m); - let sum_of_powers_of_z = util::exp_iter(*z) - .take(m) - .fold(Scalar::zero(), |acc, x| acc + x); - - let zz = z * z; + (z - z * z) * sum_y - z * z * z * sum_2 * sum_z +} - (z - zz) * sum_of_powers_of_y - z * zz * sum_of_powers_of_2 * sum_of_powers_of_z +// XXX this could be more efficient, esp for powers of 2 +fn sum_of_powers_of(a: &Scalar, to: usize) -> Scalar { + util::exp_iter(*a) + .take(to) + .fold(Scalar::zero(), |acc, x| acc + x) } From 7248e9da93d97a51f7a6ce7ecf8b02df018ad0bb Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 23 Apr 2018 17:24:49 -0700 Subject: [PATCH 118/186] add ProofBlame struct --- src/aggregated_range_proof/dealer.rs | 24 +++++++++++++----------- src/aggregated_range_proof/messages.rs | 21 +++++++++++++++------ src/aggregated_range_proof/mod.rs | 2 +- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 39cf929b..1153284d 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -132,22 +132,22 @@ impl DealerAwaitingProofShares { proof_shares: &Vec, gen: &GeneratorsView, transcript: &mut ProofTranscript, - ) -> Result { + ) -> Result<(Proof, Vec), &'static str> { if self.m != proof_shares.len() { return Err( "Length of proof shares doesn't match expected length m", ); } + let mut proof_blame = Vec::new(); for (j, proof_share) in proof_shares.iter().enumerate() { - if proof_share - .verify_share(self.n, j, &self.value_challenge, &self.poly_challenge) - .is_err() - { - return Err( - "One of the proof shares is invalid", // TODO: print which one (j) is invalid - ); - } + proof_blame.push( ProofBlame{ + proof_share: proof_share.clone(), + n: self.n, + j: j, + value_challenge: self.value_challenge.clone(), + poly_challenge: self.poly_challenge.clone(), + }); } let value_commitments = proof_shares @@ -209,7 +209,7 @@ impl DealerAwaitingProofShares { r_vec.clone(), ); - Ok(Proof { + let aggregated_proof = Proof { n: self.n, value_commitments, A, @@ -220,6 +220,8 @@ impl DealerAwaitingProofShares { t_x_blinding, e_blinding, ipp_proof, - }) + }; + + Ok((aggregated_proof, proof_blame)) } } diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 8ed2edde..9ad9e58e 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -73,7 +73,6 @@ impl ProofShare { return Err("Inner product of l_vec and r_vec is not equal to t_x") } - let g = self.l_vec.iter().map(|l_i| minus_z - l_i ); let h = self.r_vec.iter() .zip(util::exp_iter(Scalar::from_u64(2))) @@ -83,7 +82,6 @@ impl ProofShare { exp_y_inv * y_jn_inv * (- r_i) + exp_y_inv * y_jn_inv * (zz * z_j * exp_2) ); - let P_check = ristretto::vartime::multiscalar_mul( iter::once(Scalar::one()) .chain(iter::once(x)) @@ -97,16 +95,13 @@ impl ProofShare { .chain(gen.G.iter()) .chain(gen.H.iter()) ); - if !P_check.is_identity() { return Err("P check is not equal to zero") } let sum_of_powers_of_y = sum_of_powers_of(&y, n); let sum_of_powers_of_2 = sum_of_powers_of(&Scalar::from_u64(2), n); - let delta = (z - zz) * sum_of_powers_of_y * y_jn - z * zz * sum_of_powers_of_2 * z_j; - let t_check = ristretto::vartime::multiscalar_mul( iter::once(zz * z_j) .chain(iter::once(x)) @@ -119,7 +114,6 @@ impl ProofShare { .chain(iter::once(&gen.pedersen_generators.B)) .chain(iter::once(&gen.pedersen_generators.B_blinding)) ); - if !t_check.is_identity() { return Err("t check is not equal to zero") } @@ -128,6 +122,21 @@ impl ProofShare { } } +// TODO: rename this to something that sounds less awkward +pub struct ProofBlame { + pub proof_share: ProofShare, + pub n: usize, + pub j: usize, + pub value_challenge: ValueChallenge, + pub poly_challenge: PolyChallenge, +} + +impl ProofBlame { + pub fn blame(&self) -> Result<(), &'static str> { + self.proof_share.verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) + } +} + pub struct Proof { pub n: usize, /// Commitment to the value diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 69b7702f..a1d3e149 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -66,7 +66,7 @@ mod tests { &generators.all(), &mut transcript, ) - .unwrap() + .unwrap().0 } fn test_u32(m: usize) { From 0e932976d384d993d5743566f02084e08aa74248 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 24 Apr 2018 11:22:23 -0700 Subject: [PATCH 119/186] rustfmt --- src/aggregated_range_proof/dealer.rs | 2 +- src/aggregated_range_proof/messages.rs | 131 ++++++++++++------------- src/aggregated_range_proof/mod.rs | 9 +- src/lib.rs | 2 +- 4 files changed, 70 insertions(+), 74 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 1153284d..8c7df889 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -141,7 +141,7 @@ impl DealerAwaitingProofShares { let mut proof_blame = Vec::new(); for (j, proof_share) in proof_shares.iter().enumerate() { - proof_blame.push( ProofBlame{ + proof_blame.push(ProofBlame { proof_share: proof_share.clone(), n: self.n, j: j, diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 9ad9e58e..45c51384 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -54,69 +54,67 @@ impl ProofShare { value_challenge: &ValueChallenge, poly_challenge: &PolyChallenge, ) -> Result<(), &'static str> { - use generators::{Generators, PedersenGenerators}; - let generators = Generators::new(PedersenGenerators::default(), n, j+1); - let gen = generators.share(j); - - // renaming and precomputation - let x = poly_challenge.x; - let y = value_challenge.y; - let z = value_challenge.z; + use generators::{Generators, PedersenGenerators}; + let generators = Generators::new(PedersenGenerators::default(), n, j + 1); + let gen = generators.share(j); + + // renaming and precomputation + let x = poly_challenge.x; + let y = value_challenge.y; + let z = value_challenge.z; let zz = z * z; let minus_z = -z; - let z_j = util::exp_iter(z).take(j+1).last().unwrap(); // z^j - let y_jn = util::exp_iter(y).take(j*n+1).last().unwrap(); // y^(j*n) - let y_jn_inv = y_jn.invert(); // y^(-j*n) - let y_inv = y.invert(); // y^(-1) - - if self.t_x != inner_product_proof::inner_product(&self.l_vec, &self.r_vec) { - return Err("Inner product of l_vec and r_vec is not equal to t_x") - } - - let g = self.l_vec.iter().map(|l_i| minus_z - l_i ); - let h = self.r_vec.iter() - .zip(util::exp_iter(Scalar::from_u64(2))) - .zip(util::exp_iter(y_inv)) - .map(|((r_i, exp_2), exp_y_inv)| - z + - exp_y_inv * y_jn_inv * (- r_i) + - exp_y_inv * y_jn_inv * (zz * z_j * exp_2) - ); - let P_check = ristretto::vartime::multiscalar_mul( - iter::once(Scalar::one()) - .chain(iter::once(x)) - .chain(iter::once(- self.e_blinding)) - .chain(g) - .chain(h), - - iter::once(&self.value_commitment.A) - .chain(iter::once(&self.value_commitment.S)) - .chain(iter::once(&gen.pedersen_generators.B_blinding)) - .chain(gen.G.iter()) - .chain(gen.H.iter()) - ); - if !P_check.is_identity() { - return Err("P check is not equal to zero") - } - - let sum_of_powers_of_y = sum_of_powers_of(&y, n); - let sum_of_powers_of_2 = sum_of_powers_of(&Scalar::from_u64(2), n); - let delta = (z - zz) * sum_of_powers_of_y * y_jn - z * zz * sum_of_powers_of_2 * z_j; - let t_check = ristretto::vartime::multiscalar_mul( - iter::once(zz * z_j) - .chain(iter::once(x)) - .chain(iter::once(x * x)) - .chain(iter::once(delta - self.t_x)) - .chain(iter::once(-self.t_x_blinding)), - iter::once(&self.value_commitment.V) - .chain(iter::once(&self.poly_commitment.T_1)) - .chain(iter::once(&self.poly_commitment.T_2)) - .chain(iter::once(&gen.pedersen_generators.B)) - .chain(iter::once(&gen.pedersen_generators.B_blinding)) - ); - if !t_check.is_identity() { - return Err("t check is not equal to zero") - } + let z_j = util::exp_iter(z).take(j + 1).last().unwrap(); // z^j + let y_jn = util::exp_iter(y).take(j * n + 1).last().unwrap(); // y^(j*n) + let y_jn_inv = y_jn.invert(); // y^(-j*n) + let y_inv = y.invert(); // y^(-1) + + if self.t_x != inner_product_proof::inner_product(&self.l_vec, &self.r_vec) { + return Err("Inner product of l_vec and r_vec is not equal to t_x"); + } + + let g = self.l_vec.iter().map(|l_i| minus_z - l_i); + let h = self.r_vec + .iter() + .zip(util::exp_iter(Scalar::from_u64(2))) + .zip(util::exp_iter(y_inv)) + .map(|((r_i, exp_2), exp_y_inv)| { + z + exp_y_inv * y_jn_inv * (-r_i) + exp_y_inv * y_jn_inv * (zz * z_j * exp_2) + }); + let P_check = ristretto::vartime::multiscalar_mul( + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(iter::once(-self.e_blinding)) + .chain(g) + .chain(h), + iter::once(&self.value_commitment.A) + .chain(iter::once(&self.value_commitment.S)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)) + .chain(gen.G.iter()) + .chain(gen.H.iter()), + ); + if !P_check.is_identity() { + return Err("P check is not equal to zero"); + } + + let sum_of_powers_of_y = sum_of_powers_of(&y, n); + let sum_of_powers_of_2 = sum_of_powers_of(&Scalar::from_u64(2), n); + let delta = (z - zz) * sum_of_powers_of_y * y_jn - z * zz * sum_of_powers_of_2 * z_j; + let t_check = ristretto::vartime::multiscalar_mul( + iter::once(zz * z_j) + .chain(iter::once(x)) + .chain(iter::once(x * x)) + .chain(iter::once(delta - self.t_x)) + .chain(iter::once(-self.t_x_blinding)), + iter::once(&self.value_commitment.V) + .chain(iter::once(&self.poly_commitment.T_1)) + .chain(iter::once(&self.poly_commitment.T_2)) + .chain(iter::once(&gen.pedersen_generators.B)) + .chain(iter::once(&gen.pedersen_generators.B_blinding)), + ); + if !t_check.is_identity() { + return Err("t check is not equal to zero"); + } Ok(()) } @@ -133,7 +131,8 @@ pub struct ProofBlame { impl ProofBlame { pub fn blame(&self) -> Result<(), &'static str> { - self.proof_share.verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) + self.proof_share + .verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) } } @@ -258,7 +257,7 @@ impl Proof { /// Compute delta(y,z) = (z - z^2)<1^n*m, y^n*m> + z^3 <1, 2^n*m> * \sum_j=0^(m-1) z^j fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let sum_y = sum_of_powers_of(y, n*m); + let sum_y = sum_of_powers_of(y, n * m); let sum_2 = sum_of_powers_of(&Scalar::from_u64(2), n); let sum_z = sum_of_powers_of(z, m); @@ -267,7 +266,7 @@ fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { // XXX this could be more efficient, esp for powers of 2 fn sum_of_powers_of(a: &Scalar, to: usize) -> Scalar { - util::exp_iter(*a) - .take(to) - .fold(Scalar::zero(), |acc, x| acc + x) + util::exp_iter(*a) + .take(to) + .fold(Scalar::zero(), |acc, x| acc + x) } diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index a1d3e149..d6227840 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -61,12 +61,9 @@ mod tests { .collect(); dealer - .receive_shares( - &proof_shares, - &generators.all(), - &mut transcript, - ) - .unwrap().0 + .receive_shares(&proof_shares, &generators.all(), &mut transcript) + .unwrap() + .0 } fn test_u32(m: usize) { diff --git a/src/lib.rs b/src/lib.rs index 2579762d..7ce98bac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,7 +34,7 @@ mod proof_transcript; mod range_proof; pub mod aggregated_range_proof; +pub use aggregated_range_proof::*; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; -pub use aggregated_range_proof::*; From 38c2f04009b219100759df0fd322f645c613724b Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 11:24:44 -0700 Subject: [PATCH 120/186] add error checking for size of m, n --- src/aggregated_range_proof/dealer.rs | 6 ++++++ src/aggregated_range_proof/mod.rs | 6 ++---- src/aggregated_range_proof/party.rs | 12 +++++++----- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 8c7df889..59422e90 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -18,6 +18,12 @@ impl Dealer { m: usize, transcript: &mut ProofTranscript, ) -> Result { + if !n.is_power_of_two() { + return Err("n is not a power of two") + } + if !m.is_power_of_two() { + return Err("m is not a power of two") + } transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); Ok(DealerAwaitingValueCommitments { n, m }) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d6227840..94fb8c22 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -28,7 +28,7 @@ mod tests { .iter() .map(|&v| { let v_blinding = Scalar::random(rng); - Party::new(v, v_blinding, n, &generators) + Party::new(v, v_blinding, n, &generators).unwrap() }) .collect(); @@ -39,9 +39,7 @@ mod tests { .enumerate() .map(|(j, p)| p.assign_position(j, rng)) .unzip(); - - // let (a, b) = dealer.receive_value_commitments(&mut transcript, &value_commitments).unwrap(); - + let (dealer, value_challenge) = dealer .receive_value_commitments(&value_commitments, &mut transcript) .unwrap(); diff --git a/src/aggregated_range_proof/party.rs b/src/aggregated_range_proof/party.rs index 47202b83..24427706 100644 --- a/src/aggregated_range_proof/party.rs +++ b/src/aggregated_range_proof/party.rs @@ -17,19 +17,22 @@ impl Party { v_blinding: Scalar, n: usize, generators: &Generators, - ) -> PartyAwaitingPosition { + ) -> Result { + if !n.is_power_of_two() { + return Err("n is not a power of two") + } let V = generators .share(0) .pedersen_generators .commit(Scalar::from_u64(v), v_blinding); - PartyAwaitingPosition { + Ok(PartyAwaitingPosition { generators: generators, n, v, v_blinding, V, - } + }) } } @@ -103,7 +106,7 @@ pub struct PartyAwaitingValueChallenge<'a> { v: u64, v_blinding: Scalar, - j: usize, // index of the party, 1..m as in original paper + j: usize, generators: &'a Generators, value_commitment: ValueCommitment, a_blinding: Scalar, @@ -194,7 +197,6 @@ pub struct PartyAwaitingPolyChallenge { impl PartyAwaitingPolyChallenge { pub fn apply_challenge(self, pc: &PolyChallenge) -> ProofShare { - // Generate final values for proof (line 55-60) let t_blinding_poly = util::Poly2( self.z * self.z * self.offset_z * self.v_blinding, self.t_1_blinding, From 0b7e3b5dd0dfefdb59c5885dfd0e450d354add4c Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 11:26:34 -0700 Subject: [PATCH 121/186] fmt --- src/aggregated_range_proof/dealer.rs | 4 ++-- src/aggregated_range_proof/mod.rs | 2 +- src/aggregated_range_proof/party.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 59422e90..f40df620 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -19,10 +19,10 @@ impl Dealer { transcript: &mut ProofTranscript, ) -> Result { if !n.is_power_of_two() { - return Err("n is not a power of two") + return Err("n is not a power of two"); } if !m.is_power_of_two() { - return Err("m is not a power of two") + return Err("m is not a power of two"); } transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 94fb8c22..f752443e 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -39,7 +39,7 @@ mod tests { .enumerate() .map(|(j, p)| p.assign_position(j, rng)) .unzip(); - + let (dealer, value_challenge) = dealer .receive_value_commitments(&value_commitments, &mut transcript) .unwrap(); diff --git a/src/aggregated_range_proof/party.rs b/src/aggregated_range_proof/party.rs index 24427706..64e4760b 100644 --- a/src/aggregated_range_proof/party.rs +++ b/src/aggregated_range_proof/party.rs @@ -19,7 +19,7 @@ impl Party { generators: &Generators, ) -> Result { if !n.is_power_of_two() { - return Err("n is not a power of two") + return Err("n is not a power of two"); } let V = generators .share(0) From 5b336660ed1330e535eba47386a1db56609a61fe Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 13:05:56 -0700 Subject: [PATCH 122/186] add n>64 checking --- src/aggregated_range_proof/dealer.rs | 6 +++--- src/aggregated_range_proof/party.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index f40df620..94e4f06b 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -18,11 +18,11 @@ impl Dealer { m: usize, transcript: &mut ProofTranscript, ) -> Result { - if !n.is_power_of_two() { - return Err("n is not a power of two"); + if !n.is_power_of_two() || n > 64 { + return Err("n is not valid: must be a power of 2, and less than or equal to 64"); } if !m.is_power_of_two() { - return Err("m is not a power of two"); + return Err("m is not valid: must be a power of 2"); } transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); diff --git a/src/aggregated_range_proof/party.rs b/src/aggregated_range_proof/party.rs index 64e4760b..55f8a637 100644 --- a/src/aggregated_range_proof/party.rs +++ b/src/aggregated_range_proof/party.rs @@ -18,8 +18,8 @@ impl Party { n: usize, generators: &Generators, ) -> Result { - if !n.is_power_of_two() { - return Err("n is not a power of two"); + if !n.is_power_of_two() || n > 64 { + return Err("n is not valid: must be a power of 2, and less than or equal to 64"); } let V = generators .share(0) From a86268fe43c9e641e796e3e1be11cafbbb8d3c26 Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 13:53:14 -0700 Subject: [PATCH 123/186] lib fixup --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 7ce98bac..76bc653c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,13 +28,13 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} + mod generators; mod inner_product_proof; mod proof_transcript; mod range_proof; pub mod aggregated_range_proof; -pub use aggregated_range_proof::*; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; From fcf40ddc2686cdf3d897023b1a494cc3fa42281b Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 24 Apr 2018 17:45:12 -0700 Subject: [PATCH 124/186] rustfmt --- src/aggregated_range_proof/dealer.rs | 12 +++--------- src/aggregated_range_proof/mod.rs | 2 +- src/lib.rs | 2 +- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 94e4f06b..6dbd1ab0 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -45,9 +45,7 @@ impl DealerAwaitingValueCommitments { transcript: &mut ProofTranscript, ) -> Result<(DealerAwaitingPolyCommitments, ValueChallenge), &'static str> { if self.m != value_commitments.len() { - return Err( - "Length of value commitments doesn't match expected length m", - ); + return Err("Length of value commitments doesn't match expected length m"); } let mut A = RistrettoPoint::identity(); @@ -94,9 +92,7 @@ impl DealerAwaitingPolyCommitments { transcript: &mut ProofTranscript, ) -> Result<(DealerAwaitingProofShares, PolyChallenge), &'static str> { if self.m != poly_commitments.len() { - return Err( - "Length of poly commitments doesn't match expected length m", - ); + return Err("Length of poly commitments doesn't match expected length m"); } // Commit sums of T1s and T2s. @@ -140,9 +136,7 @@ impl DealerAwaitingProofShares { transcript: &mut ProofTranscript, ) -> Result<(Proof, Vec), &'static str> { if self.m != proof_shares.len() { - return Err( - "Length of proof shares doesn't match expected length m", - ); + return Err("Length of proof shares doesn't match expected length m"); } let mut proof_blame = Vec::new(); diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index f752443e..60fd50c8 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -8,7 +8,7 @@ pub mod party; mod tests { use std::iter; - use rand::{Rng, OsRng}; + use rand::{OsRng, Rng}; use curve25519_dalek::scalar::Scalar; use proof_transcript::ProofTranscript; diff --git a/src/lib.rs b/src/lib.rs index 76bc653c..c1c066bb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,11 +29,11 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} +pub mod aggregated_range_proof; mod generators; mod inner_product_proof; mod proof_transcript; mod range_proof; -pub mod aggregated_range_proof; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; From fc9df98c13329ac7a7523bf47666d05498d16b8d Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 25 Apr 2018 11:38:30 -0700 Subject: [PATCH 125/186] Add tests for ProofBlame --- src/aggregated_range_proof/messages.rs | 2 +- src/aggregated_range_proof/mod.rs | 19 +++++++++++++------ src/lib.rs | 1 - 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 45c51384..83f92940 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -120,7 +120,6 @@ impl ProofShare { } } -// TODO: rename this to something that sounds less awkward pub struct ProofBlame { pub proof_share: ProofShare, pub n: usize, @@ -130,6 +129,7 @@ pub struct ProofBlame { } impl ProofBlame { + /// Returns whether the proof share is valid (Ok) or invalid (Err) pub fn blame(&self) -> Result<(), &'static str> { self.proof_share .verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 60fd50c8..b9f48ccb 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -17,7 +17,7 @@ mod tests { use super::messages::*; use super::party::*; - fn create_multi(rng: &mut R, values: Vec, n: usize) -> Proof { + fn create_multi(rng: &mut R, values: Vec, n: usize) -> (Proof, Vec) { use generators::{Generators, PedersenGenerators}; let m = values.len(); @@ -61,7 +61,6 @@ mod tests { dealer .receive_shares(&proof_shares, &generators.all(), &mut transcript) .unwrap() - .0 } fn test_u32(m: usize) { @@ -72,8 +71,12 @@ mod tests { .map(|()| rng.next_u32() as u64) .take(m) .collect(); - let rp = create_multi(&mut rng, v, 32); - assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + let (proof, proof_blames) = create_multi(&mut rng, v, 32); + assert!(proof.verify(&mut rng, &mut transcript).is_ok()); + proof_blames + .iter() + .map(|pb| assert!(pb.blame().is_ok())) + .last(); } fn test_u64(m: usize) { @@ -81,8 +84,12 @@ mod tests { let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); - let rp = create_multi(&mut rng, v, 64); - assert!(rp.verify(&mut rng, &mut transcript).is_ok()); + let (proof, proof_blames) = create_multi(&mut rng, v, 64); + assert!(proof.verify(&mut rng, &mut transcript).is_ok()); + proof_blames + .iter() + .map(|pb| assert!(pb.blame().is_ok())) + .last(); } #[test] diff --git a/src/lib.rs b/src/lib.rs index c1c066bb..2269f42b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,6 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} - pub mod aggregated_range_proof; mod generators; mod inner_product_proof; From 81f6760cfa7011c761319d2610ee17c8ad6ab4e9 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 25 Apr 2018 12:00:54 -0700 Subject: [PATCH 126/186] rename ProofBlame to ProofShareVerifier --- src/aggregated_range_proof/dealer.rs | 8 ++++---- src/aggregated_range_proof/messages.rs | 6 +++--- src/aggregated_range_proof/mod.rs | 18 +++++++++++------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 6dbd1ab0..25e997aa 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -134,14 +134,14 @@ impl DealerAwaitingProofShares { proof_shares: &Vec, gen: &GeneratorsView, transcript: &mut ProofTranscript, - ) -> Result<(Proof, Vec), &'static str> { + ) -> Result<(Proof, Vec), &'static str> { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); } - let mut proof_blame = Vec::new(); + let mut share_verifiers = Vec::new(); for (j, proof_share) in proof_shares.iter().enumerate() { - proof_blame.push(ProofBlame { + share_verifiers.push(ProofShareVerifier { proof_share: proof_share.clone(), n: self.n, j: j, @@ -222,6 +222,6 @@ impl DealerAwaitingProofShares { ipp_proof, }; - Ok((aggregated_proof, proof_blame)) + Ok((aggregated_proof, share_verifiers)) } } diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 83f92940..0200eb29 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -120,7 +120,7 @@ impl ProofShare { } } -pub struct ProofBlame { +pub struct ProofShareVerifier { pub proof_share: ProofShare, pub n: usize, pub j: usize, @@ -128,9 +128,9 @@ pub struct ProofBlame { pub poly_challenge: PolyChallenge, } -impl ProofBlame { +impl ProofShareVerifier { /// Returns whether the proof share is valid (Ok) or invalid (Err) - pub fn blame(&self) -> Result<(), &'static str> { + pub fn verify_share(&self) -> Result<(), &'static str> { self.proof_share .verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) } diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index b9f48ccb..2eb32b31 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -17,7 +17,11 @@ mod tests { use super::messages::*; use super::party::*; - fn create_multi(rng: &mut R, values: Vec, n: usize) -> (Proof, Vec) { + fn create_multi( + rng: &mut R, + values: Vec, + n: usize, + ) -> (Proof, Vec) { use generators::{Generators, PedersenGenerators}; let m = values.len(); @@ -71,11 +75,11 @@ mod tests { .map(|()| rng.next_u32() as u64) .take(m) .collect(); - let (proof, proof_blames) = create_multi(&mut rng, v, 32); + let (proof, share_verifiers) = create_multi(&mut rng, v, 32); assert!(proof.verify(&mut rng, &mut transcript).is_ok()); - proof_blames + share_verifiers .iter() - .map(|pb| assert!(pb.blame().is_ok())) + .map(|sv| assert!(sv.verify_share().is_ok())) .last(); } @@ -84,11 +88,11 @@ mod tests { let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); - let (proof, proof_blames) = create_multi(&mut rng, v, 64); + let (proof, share_verifiers) = create_multi(&mut rng, v, 64); assert!(proof.verify(&mut rng, &mut transcript).is_ok()); - proof_blames + share_verifiers .iter() - .map(|pb| assert!(pb.blame().is_ok())) + .map(|sv| assert!(sv.verify_share().is_ok())) .last(); } From 82988a02d06994937dc0c64f987ef39c9dbce1ab Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 25 Apr 2018 13:43:46 -0700 Subject: [PATCH 127/186] add fast sum_of_powers func to utils --- src/aggregated_range_proof/messages.rs | 19 +++------ src/util.rs | 56 +++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 0200eb29..ebc122f9 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -97,9 +97,9 @@ impl ProofShare { return Err("P check is not equal to zero"); } - let sum_of_powers_of_y = sum_of_powers_of(&y, n); - let sum_of_powers_of_2 = sum_of_powers_of(&Scalar::from_u64(2), n); - let delta = (z - zz) * sum_of_powers_of_y * y_jn - z * zz * sum_of_powers_of_2 * z_j; + let sum_of_powers_y = util::sum_of_powers(&y, n); + let sum_of_powers_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let delta = (z - zz) * sum_of_powers_y * y_jn - z * zz * sum_of_powers_2 * z_j; let t_check = ristretto::vartime::multiscalar_mul( iter::once(zz * z_j) .chain(iter::once(x)) @@ -257,16 +257,9 @@ impl Proof { /// Compute delta(y,z) = (z - z^2)<1^n*m, y^n*m> + z^3 <1, 2^n*m> * \sum_j=0^(m-1) z^j fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let sum_y = sum_of_powers_of(y, n * m); - let sum_2 = sum_of_powers_of(&Scalar::from_u64(2), n); - let sum_z = sum_of_powers_of(z, m); + let sum_y = util::sum_of_powers(y, n * m); + let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let sum_z = util::sum_of_powers(z, m); (z - z * z) * sum_y - z * z * z * sum_2 * sum_z } - -// XXX this could be more efficient, esp for powers of 2 -fn sum_of_powers_of(a: &Scalar, to: usize) -> Scalar { - util::exp_iter(*a) - .take(to) - .fold(Scalar::zero(), |acc, x| acc + x) -} diff --git a/src/util.rs b/src/util.rs index 6207b49c..007b725f 100644 --- a/src/util.rs +++ b/src/util.rs @@ -100,6 +100,33 @@ pub fn scalar_exp_vartime(x: &Scalar, mut n: u64) -> Scalar { result } +/// Takes the sum of all the powers of `x`, up to `n` +/// If `n` is a power of 2, it uses the efficient algorithm with `2*lg n` multiplcations and additions. +/// If `n` is not a power of 2, it uses the slow algorithm with `n` multiplications and additions. +/// In the Bulletproofs case, all calls to `sum_of_powers` should have `n` as a power of 2. +pub fn sum_of_powers(x: &Scalar, n: usize) -> Scalar { + if !n.is_power_of_two() { + return sum_of_powers_slow(x, n); + } + if n == 0 || n == 1 { + return Scalar::from_u64(n as u64); + } + let mut m = n; + let mut result = Scalar::one() + x; + let mut factor = *x; + while m > 2 { + factor = factor * factor; + result = result + factor * result; + m = m / 2; + } + result +} + +// takes the sum of all of the powers of x, up to n +fn sum_of_powers_slow(x: &Scalar, n: usize) -> Scalar { + exp_iter(*x).take(n).fold(Scalar::zero(), |acc, x| acc + x) +} + #[cfg(test)] mod tests { use super::*; @@ -132,7 +159,7 @@ mod tests { } /// Raises `x` to the power `n`. - pub fn scalar_exp_vartime_slow(x: &Scalar, n: u64) -> Scalar { + fn scalar_exp_vartime_slow(x: &Scalar, n: u64) -> Scalar { let mut result = Scalar::one(); for _ in 0..n { result = result * x; @@ -141,7 +168,7 @@ mod tests { } #[test] - fn scalar_exp() { + fn test_scalar_exp() { let x = Scalar::from_bits( *b"\x84\xfc\xbcOx\x12\xa0\x06\xd7\x91\xd9z:'\xdd\x1e!CE\xf7\xb1\xb9Vz\x810sD\x96\x85\xb5\x07", ); @@ -157,4 +184,29 @@ mod tests { scalar_exp_vartime_slow(&x, 0b11001010) ); } + + #[test] + fn test_sum_of_powers() { + let x = Scalar::from_u64(10); + assert_eq!(sum_of_powers_slow(&x, 0), sum_of_powers(&x, 0)); + assert_eq!(sum_of_powers_slow(&x, 1), sum_of_powers(&x, 1)); + assert_eq!(sum_of_powers_slow(&x, 2), sum_of_powers(&x, 2)); + assert_eq!(sum_of_powers_slow(&x, 4), sum_of_powers(&x, 4)); + assert_eq!(sum_of_powers_slow(&x, 8), sum_of_powers(&x, 8)); + assert_eq!(sum_of_powers_slow(&x, 16), sum_of_powers(&x, 16)); + assert_eq!(sum_of_powers_slow(&x, 32), sum_of_powers(&x, 32)); + assert_eq!(sum_of_powers_slow(&x, 64), sum_of_powers(&x, 64)); + } + + #[test] + fn test_sum_of_powers_slow() { + let x = Scalar::from_u64(10); + assert_eq!(sum_of_powers_slow(&x, 0), Scalar::zero()); + assert_eq!(sum_of_powers_slow(&x, 1), Scalar::one()); + assert_eq!(sum_of_powers_slow(&x, 2), Scalar::from_u64(11)); + assert_eq!(sum_of_powers_slow(&x, 3), Scalar::from_u64(111)); + assert_eq!(sum_of_powers_slow(&x, 4), Scalar::from_u64(1111)); + assert_eq!(sum_of_powers_slow(&x, 5), Scalar::from_u64(11111)); + assert_eq!(sum_of_powers_slow(&x, 6), Scalar::from_u64(111111)); + } } From 737d52c3efd8b88b6e3f665ae095331b636566dd Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 25 Apr 2018 17:05:16 -0700 Subject: [PATCH 128/186] Rename Proof to AggregatedProof --- src/aggregated_range_proof/dealer.rs | 4 ++-- src/aggregated_range_proof/messages.rs | 4 ++-- src/aggregated_range_proof/mod.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 25e997aa..297610b4 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -134,7 +134,7 @@ impl DealerAwaitingProofShares { proof_shares: &Vec, gen: &GeneratorsView, transcript: &mut ProofTranscript, - ) -> Result<(Proof, Vec), &'static str> { + ) -> Result<(AggregatedProof, Vec), &'static str> { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); } @@ -209,7 +209,7 @@ impl DealerAwaitingProofShares { r_vec.clone(), ); - let aggregated_proof = Proof { + let aggregated_proof = AggregatedProof { n: self.n, value_commitments, A, diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index ebc122f9..786aed81 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -136,7 +136,7 @@ impl ProofShareVerifier { } } -pub struct Proof { +pub struct AggregatedProof { pub n: usize, /// Commitment to the value // XXX this should not be included, so that we can prove about existing commitments @@ -160,7 +160,7 @@ pub struct Proof { pub ipp_proof: inner_product_proof::InnerProductProof, } -impl Proof { +impl AggregatedProof { pub fn verify(&self, rng: &mut R, transcript: &mut ProofTranscript) -> Result<(), ()> { use generators::{Generators, PedersenGenerators}; diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 2eb32b31..adf8e82f 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -21,7 +21,7 @@ mod tests { rng: &mut R, values: Vec, n: usize, - ) -> (Proof, Vec) { + ) -> (AggregatedProof, Vec) { use generators::{Generators, PedersenGenerators}; let m = values.len(); From c370ba517678097c99a6114a683179355578bc9a Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 25 Apr 2018 17:51:16 -0700 Subject: [PATCH 129/186] create a SinglePartyAggregator --- src/aggregated_range_proof/mod.rs | 70 +++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index adf8e82f..d9b28f1a 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -1,9 +1,79 @@ #![allow(non_snake_case)] +use rand::Rng; + +use curve25519_dalek::scalar::Scalar; + +use generators::Generators; +use proof_transcript::ProofTranscript; + pub mod dealer; pub mod messages; pub mod party; +pub use self::messages::AggregatedProof; + +struct SinglePartyAggregator {} + +impl SinglePartyAggregator { + /// Create an aggregated rangeproof of multiple values. + /// + /// This performs the same proof aggregation MPC protocol, but + /// with one party playing all roles. + /// + /// The length of `values` must be a power of 2. + /// + /// XXX this should allow proving about existing commitments. + fn generate_proof( + generators: &Generators, + transcript: &mut ProofTranscript, + rng: &mut R, + values: &[u64], + n: usize, + ) -> Result { + use self::dealer::*; + use self::messages::*; + use self::party::*; + + let dealer = Dealer::new(n, values.len(), transcript)?; + + let parties: Vec<_> = values + .iter() + .map(|&v| { + let v_blinding = Scalar::random(rng); + Party::new(v, v_blinding, n, &generators) + }) + // Collect the iterator of Results into a Result, then unwrap it + .collect::,_>>()?; + + let (parties, value_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .enumerate() + .map(|(j, p)| p.assign_position(j, rng)) + .unzip(); + + let (dealer, value_challenge) = + dealer.receive_value_commitments(&value_commitments, transcript)?; + + let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .map(|p| p.apply_challenge(&value_challenge, rng)) + .unzip(); + + let (dealer, poly_challenge) = + dealer.receive_poly_commitments(&poly_commitments, transcript)?; + + let proof_shares: Vec<_> = parties + .into_iter() + .map(|p| p.apply_challenge(&poly_challenge)) + .collect(); + + let (proof, _) = dealer.receive_shares(&proof_shares, &generators.all(), transcript)?; + + Ok(proof) + } +} + #[cfg(test)] mod tests { use std::iter; From e77b368d1abef9f7d1160121ce2ea3d275e4ec49 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 24 Apr 2018 13:17:16 -0700 Subject: [PATCH 130/186] use standard tiny-keccak with duplex construction --- Cargo.toml | 5 +- src/proof_transcript.rs | 236 +++++++++++++++++++++++++++------------- 2 files changed, 160 insertions(+), 81 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index abe67906..104e38d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,10 +19,7 @@ rand = "^0.4" byteorder = "1.2.1" serde = "1" serde_derive = "1" - -[dependencies.tiny-keccak] -git = 'https://github.com/chain/tiny-keccak.git' -rev = '5925f81b3c351440283c3328e2345d982aac0f6e' +tiny-keccak = "1.4.1" [dev-dependencies] hex = "^0.3" diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index aa15c0cb..f847aa81 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -6,10 +6,6 @@ use curve25519_dalek::scalar::Scalar; -// XXX This uses experiment fork of tiny_keccak with half-duplex -// support that we require in this implementation. -// Review this after this PR is merged or updated: -// https://github.com/debris/tiny-keccak/pull/24 use tiny_keccak::Keccak; use byteorder::{ByteOrder, LittleEndian}; @@ -63,41 +59,51 @@ use byteorder::{ByteOrder, LittleEndian}; #[derive(Clone)] pub struct ProofTranscript { hash: Keccak, + rate: usize, + write_offset: usize, // index within a block where the message must be absorbed } impl ProofTranscript { + // Implementation notes + // + // The implementation has 3 layers: + // 1. commit/challenge - take input/output buffers <64K, responsible for disambiguation (length prefixing) + // 2. write/read - take arbitrary buffers, responsible for splitting data over Keccak-f invocations and padding + // 3. absorb/squeeze - actual sponge operations, outer layers ensure that absorb/squeeze do not perform unnecessary permutation + // + /// Begin a new, empty proof transcript, using the given `label` /// for domain separation. pub fn new(label: &[u8]) -> Self { let mut ro = ProofTranscript { + // NOTE: if you change the security parameter, also change the rate below hash: Keccak::new_shake128(), + rate: 1600/8 - (2*128/8), // 168 bytes + write_offset: 0, }; + // We will bump the version prefix each time we + // make a breaking change in order to disambiguate + // from the previous versions of this implementation. + ro.commit(b"ProofTranscript v2"); ro.commit(label); - // makes sure the label is disambiguated from the rest of the messages. - ro.pad(); + let mut empty = [0;0]; + ro.challenge_bytes(&mut empty[..]); ro } - /// Commit a `message` to the proof transcript. + /// Commit a `input` to the proof transcript. /// /// # Note /// - /// Each message must be shorter than 64Kb (65536 bytes). - pub fn commit(&mut self, message: &[u8]) { - let len = message.len(); + /// Each input must be ≤ than the number of bytes + /// returned by `max_commit_size()`. + pub fn commit(&mut self, input: &[u8]) { + let len = input.len(); if len > (u16::max_value() as usize) { - panic!("Committed message must be less than 64Kb!"); + panic!("Committed input must be less than 64Kb!"); } - - let mut len_prefix = [0u8; 2]; - LittleEndian::write_u16(&mut len_prefix, len as u16); - - // XXX we rely on tiny_keccak experimental support for half-duplex mode and - // correct switching from absorbing to squeezing and back. - // Review this after this PR is merged or updated: - // https://github.com/debris/tiny-keccak/pull/24 - self.hash.absorb(&len_prefix); - self.hash.absorb(message); + self.write_u16(len as u16); + self.write(input); } /// Commit a `u64` to the proof transcript. @@ -112,12 +118,18 @@ impl ProofTranscript { } /// Extracts an arbitrary-sized challenge byte slice. - pub fn challenge_bytes(&mut self, mut output: &mut [u8]) { - // XXX we rely on tiny_keccak experimental support for half-duplex mode and - // correct switching from absorbing to squeezing and back. - // Review this after this PR is merged or updated: - // https://github.com/debris/tiny-keccak/pull/24 - self.hash.squeeze(&mut output); + pub fn challenge_bytes(&mut self, output: &mut [u8]) { + let len = output.len(); + if output.len() > (u16::max_value() as usize) { + panic!("Challenge output must be less than 64Kb!"); + } + // Note: when reading, length prefix N is followed by keccak padding 10*1 + // as if empty message was written; when writing, length prefix N is followed + // by N bytes followed by keccak padding 10*1. + // This creates ambiguity only for case N=0 (empty write or empty read), + // which is safe as no information is actually transmitted in either direction. + self.write_u16(len as u16); + self.read(output); } /// Extracts a challenge scalar. @@ -130,12 +142,60 @@ impl ProofTranscript { Scalar::from_bytes_mod_order_wide(&buf) } - /// Pad separates the prior operations by padding - /// the rest of the block with zeroes and applying a permutation. - /// Each incoming message is length-prefixed anyway, but padding - /// enables pre-computing and re-using the oracle state. - fn pad(&mut self) { + /// Extracts a pair of challenge scalars. + /// + /// This is a convenience method that extracts 128 bytes and + /// reduces each 64-byte half modulo the group order. + pub fn challenge_scalars_pair(&mut self) -> (Scalar, Scalar) { + let mut buf128 = [0u8; 128]; + let mut buf64 = [0u8; 64]; + self.challenge_bytes(&mut buf128); + buf64.copy_from_slice(&buf128[..64]); + let a = Scalar::from_bytes_mod_order_wide(&buf64); + buf64.copy_from_slice(&buf128[64..]); + let b = Scalar::from_bytes_mod_order_wide(&buf64); + (a,b) + } + + /// Internal API: writes 2-byte length prefix. + fn write_u16(&mut self, integer: u16) { + let mut intbuf = [0u8; 2]; + LittleEndian::write_u16(&mut intbuf, integer); + self.write(&intbuf); + } + + /// Internal API: writes arbitrary byte slice + /// splitting it over multiple duplex calls if needed. + fn write(&mut self, mut input: &[u8]) { + // `write` can be called multiple times. + // If we overflow the available room (`rate-1` at most) + // we absorb what we can, add Keccak padding, permute and continue. + let mut room = self.rate - 1 - self.write_offset; // 1 byte is reserved for keccak padding 10*1. + while input.len() > room { + self.hash.absorb(&input[..room]); + self.hash.pad(); + self.hash.fill_block(); + self.write_offset = 0; + room = self.rate - 1; + input = &input[room..]; + } + self.hash.absorb(input); + self.write_offset += input.len(); // could end up == (rate-1) + } + + /// Internal API: reads arbitrary byte slice + /// splitting it over multiple duplex calls if needed. + fn read(&mut self, output: &mut [u8]) { + // Note 1: `read` is called only once after `write`, so we do + // not need to support multiple reads from some offset. + // We only need to complete the pending duplex call by padding and permuting. + // Note 2: Since we hash in the total output buffer length, + // we can use default squeeze behaviour w/o simulating blank inputs: + // the resulting byte-stream will be disambiguated by that length prefix and keccak padding. + self.hash.pad(); self.hash.fill_block(); + self.write_offset = 0; + self.hash.squeeze(output); } } @@ -149,61 +209,56 @@ mod tests { { let mut ro = ProofTranscript::new(b"TestProtocol"); ro.commit(b"test"); - { - let mut ch = [0u8; 32]; - ro.challenge_bytes(&mut ch); - assert_eq!( - hex::encode(ch), - "9ba30a0e71e8632b55fbae92495440b6afb5d2646ba6b1bb419933d97e06b810" - ); - ro.challenge_bytes(&mut ch); - assert_eq!( - hex::encode(ch), - "add523844517c2320fc23ca72423b0ee072c6d076b05a6a7b6f46d8d2e322f94" - ); - ro.challenge_bytes(&mut ch); - assert_eq!( - hex::encode(ch), - "ac279a11cac0b1271d210592c552d719d82d67c82d7f86772ed7bc6618b0927c" - ); - } - - let mut ro = ProofTranscript::new(b"TestProtocol"); - ro.commit(b"test"); - { - let mut ch = [0u8; 16]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "9ba30a0e71e8632b55fbae92495440b6"); - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "afb5d2646ba6b1bb419933d97e06b810"); - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "add523844517c2320fc23ca72423b0ee"); - } + let mut ch = [0u8; 32]; + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "956ea2a6919ea9d83523fcf31e96b78d10070d25e2c74e9b9fbab6e39f75c587" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "86533e8e5dad89cbea10d4a6c05a126713d6672005ab3e6737665f25cbad37b7" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "25b9a2ef2ee8a8e5f2a2397c13cd2ddb28f88c7aef9860d0c9e405383fa0a072" + ); + } + { let mut ro = ProofTranscript::new(b"TestProtocol"); ro.commit(b"test"); - { - let mut ch = [0u8; 16]; - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "9ba30a0e71e8632b55fbae92495440b6"); - ro.commit(b"extra commitment"); - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "11536e09cedbb6b302d8c7cd96471be5"); - ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "058c383da5f2e193a381aaa420b505b2"); - } + let mut ch = [0u8; 32]; + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "956ea2a6919ea9d83523fcf31e96b78d10070d25e2c74e9b9fbab6e39f75c587" + ); + ro.commit(b"extra commitment"); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "9322b9a5b29adb4a2f50649a7827cfd8e6e385ec02b29c75375720d8dcb18e02" + ); + ro.challenge_bytes(&mut ch); + assert_eq!( + hex::encode(ch), + "ea6f00d4158902aff1e4336c8774f0ad753baec8e90df3485240dbc9e4244813" + ); } } #[test] - fn messages_are_disambiguated_by_length_prefix() { + fn inputs_are_disambiguated_by_length_prefix() { { let mut ro = ProofTranscript::new(b"TestProtocol"); ro.commit(b"msg1msg2"); { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "1ad843ea2bf7f8b6"); + assert_eq!(hex::encode(ch), "b66f3c6296c4e048"); } } { @@ -213,7 +268,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "79abbe29d8c33bb0"); + assert_eq!(hex::encode(ch), "d4633732e4ab0ebb"); } } { @@ -223,7 +278,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "f88d0f790cde50d5"); + assert_eq!(hex::encode(ch), "3a8811dd01672b37"); } } { @@ -234,8 +289,35 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "90ca22b443fb78a1"); + assert_eq!(hex::encode(ch), "49800ad89aedfd44"); } } } + + + #[test] + fn outputs_are_disambiguated_by_length_prefix() { + let mut ro = ProofTranscript::new(b"TestProtocol"); + { + let mut ch = [0u8; 16]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "2c56459cdec02be511b7f97a41a54eba"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "dfa982f9ad6167f3eb5eea78815d062b"); + } + + let mut ro = ProofTranscript::new(b"TestProtocol"); + { + let mut ch = [0u8; 8]; + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "c5103a6cfa35c699"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "d471afab3b0a2ff1"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "4697bc53108977b2"); + ro.challenge_bytes(&mut ch); + assert_eq!(hex::encode(ch), "67e8d3923df50dd0"); + } + } + } From 3dc6595a948d7eceda0430e8c76f00e0fee52d01 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 24 Apr 2018 13:27:56 -0700 Subject: [PATCH 131/186] test vectors updated --- src/proof_transcript.rs | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index f847aa81..e5541bcf 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -86,8 +86,6 @@ impl ProofTranscript { // from the previous versions of this implementation. ro.commit(b"ProofTranscript v2"); ro.commit(label); - let mut empty = [0;0]; - ro.challenge_bytes(&mut empty[..]); ro } @@ -213,17 +211,17 @@ mod tests { ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "956ea2a6919ea9d83523fcf31e96b78d10070d25e2c74e9b9fbab6e39f75c587" + "dec44a90f423c15874f7c0afaf62cc6cc0987bf428202cb3508fc7d7c9b5b30a" ); ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "86533e8e5dad89cbea10d4a6c05a126713d6672005ab3e6737665f25cbad37b7" + "f83256ef4964d71ec6f2dd2f79db70820c781bd8c3d1fceec7cbfa4965d4e530" ); ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "25b9a2ef2ee8a8e5f2a2397c13cd2ddb28f88c7aef9860d0c9e405383fa0a072" + "962f9ef161604c5dcbe3387773b293a0e27a6e6ee14ec5d9f6c78a45c36fc0e1" ); } @@ -234,18 +232,18 @@ mod tests { ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "956ea2a6919ea9d83523fcf31e96b78d10070d25e2c74e9b9fbab6e39f75c587" + "dec44a90f423c15874f7c0afaf62cc6cc0987bf428202cb3508fc7d7c9b5b30a" ); ro.commit(b"extra commitment"); ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "9322b9a5b29adb4a2f50649a7827cfd8e6e385ec02b29c75375720d8dcb18e02" + "edf99afca6c21e4240f33826d60cb1b7c5d59d3dd363d2928bab7b8f94d24eaa" ); ro.challenge_bytes(&mut ch); assert_eq!( hex::encode(ch), - "ea6f00d4158902aff1e4336c8774f0ad753baec8e90df3485240dbc9e4244813" + "a42eabb9d1c9c73dc8c33c0933cee8d5fabd48fcab686d9fcb8f1680841e4369" ); } } @@ -258,7 +256,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "b66f3c6296c4e048"); + assert_eq!(hex::encode(ch), "3a941266af4275d5"); } } { @@ -268,7 +266,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "d4633732e4ab0ebb"); + assert_eq!(hex::encode(ch), "644d94299bcc5590"); } } { @@ -278,7 +276,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "3a8811dd01672b37"); + assert_eq!(hex::encode(ch), "14f18d260e679f9a"); } } { @@ -289,7 +287,7 @@ mod tests { { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "49800ad89aedfd44"); + assert_eq!(hex::encode(ch), "09dccc9d7dfa6f37"); } } } @@ -301,22 +299,22 @@ mod tests { { let mut ch = [0u8; 16]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "2c56459cdec02be511b7f97a41a54eba"); + assert_eq!(hex::encode(ch), "60890c8d774932db1aba587941cbffca"); ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "dfa982f9ad6167f3eb5eea78815d062b"); + assert_eq!(hex::encode(ch), "bb9308c7d34769ae2a3c040394efb2ab"); } let mut ro = ProofTranscript::new(b"TestProtocol"); { let mut ch = [0u8; 8]; ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "c5103a6cfa35c699"); + assert_eq!(hex::encode(ch), "cc76fac64922bc58"); ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "d471afab3b0a2ff1"); + assert_eq!(hex::encode(ch), "d259804aae5c3246"); ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "4697bc53108977b2"); + assert_eq!(hex::encode(ch), "6d3a732156286895"); ro.challenge_bytes(&mut ch); - assert_eq!(hex::encode(ch), "67e8d3923df50dd0"); + assert_eq!(hex::encode(ch), "2165dcd38764b5ae"); } } From b4e21ff625271b5eba445990837e8f963bafe9bf Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Wed, 25 Apr 2018 15:20:49 -0700 Subject: [PATCH 132/186] improved documentation --- src/proof_transcript.rs | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index e5541bcf..65419470 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -27,12 +27,13 @@ use byteorder::{ByteOrder, LittleEndian}; /// ensure that their challenge values are bound to the *entire* proof /// transcript, not just the sub-protocol. /// -/// Internally, the `ProofTranscript` is supposed to use Keccak to -/// absorb incoming messages and to squeeze challenges. The -/// construction currently used is ad-hoc, has no security analysis, -/// and is **only suitable for testing**. +/// ## Warning /// -/// # Example +/// Internally, the `ProofTranscript` uses ad-hoc duplex construction +/// using Keccak that absorbs incoming messages and squeezes challenges. +/// There is no security analysis yet, so it is **only suitable for testing**. +/// +/// ## Example /// /// ``` /// # extern crate curve25519_dalek; @@ -116,6 +117,10 @@ impl ProofTranscript { } /// Extracts an arbitrary-sized challenge byte slice. + /// + /// Note: each call performs at least one Keccak permutation, + /// so if you need to read multiple logical challenges at once, + /// you should read a bigger slice in one call for minimal overhead. pub fn challenge_bytes(&mut self, output: &mut [u8]) { let len = output.len(); if output.len() > (u16::max_value() as usize) { @@ -134,27 +139,18 @@ impl ProofTranscript { /// /// This is a convenience method that extracts 64 bytes and /// reduces modulo the group order. + /// + /// Note: each call performs at least one Keccak permutation, + /// so if you need to read multiple challenge scalars, + /// for the minimal overhead you should read `n*64` bytes + /// using the `challenge_bytes` method and reduce each + /// 64-byte window into a scalar yourself. pub fn challenge_scalar(&mut self) -> Scalar { let mut buf = [0u8; 64]; self.challenge_bytes(&mut buf); Scalar::from_bytes_mod_order_wide(&buf) } - /// Extracts a pair of challenge scalars. - /// - /// This is a convenience method that extracts 128 bytes and - /// reduces each 64-byte half modulo the group order. - pub fn challenge_scalars_pair(&mut self) -> (Scalar, Scalar) { - let mut buf128 = [0u8; 128]; - let mut buf64 = [0u8; 64]; - self.challenge_bytes(&mut buf128); - buf64.copy_from_slice(&buf128[..64]); - let a = Scalar::from_bytes_mod_order_wide(&buf64); - buf64.copy_from_slice(&buf128[64..]); - let b = Scalar::from_bytes_mod_order_wide(&buf64); - (a,b) - } - /// Internal API: writes 2-byte length prefix. fn write_u16(&mut self, integer: u16) { let mut intbuf = [0u8; 2]; From 8bdb1c85b4fb9335b9a84a5c651ae3283d2cb998 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 10:47:37 -0700 Subject: [PATCH 133/186] Fix missing Serialize impl on AggregatedProof --- src/aggregated_range_proof/messages.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 786aed81..50c2b5fa 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -136,6 +136,7 @@ impl ProofShareVerifier { } } +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct AggregatedProof { pub n: usize, /// Commitment to the value From 0909ddbe195b4dbf58af4a17f88c877625bb0123 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 10:48:00 -0700 Subject: [PATCH 134/186] Refactor aggregated proof testing code to do a full round trip. Use the same method as in the single RP code to do: 1. Generate proof 2. Serialize 3. Deserialize 4. Verify --- src/aggregated_range_proof/mod.rs | 173 +++++++++++++++--------------- 1 file changed, 84 insertions(+), 89 deletions(-) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d9b28f1a..ecea9526 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -76,117 +76,112 @@ impl SinglePartyAggregator { #[cfg(test)] mod tests { - use std::iter; + use rand::OsRng; - use rand::{OsRng, Rng}; + use super::*; - use curve25519_dalek::scalar::Scalar; - use proof_transcript::ProofTranscript; + use generators::PedersenGenerators; - use super::dealer::*; - use super::messages::*; - use super::party::*; - - fn create_multi( - rng: &mut R, - values: Vec, - n: usize, - ) -> (AggregatedProof, Vec) { - use generators::{Generators, PedersenGenerators}; - - let m = values.len(); + /// Given a bitsize `n`, test the following: + /// + /// 1. Generate `m` random values and create a proof they are all in range; + /// 2. Serialize to wire format; + /// 3. Deserialize from wire format; + /// 4. Verify the proof. + fn singleparty_create_and_verify_helper(n: usize, m: usize) { + // Split the test into two scopes, so that it's explicit what + // data is shared between the prover and the verifier. + + // Use bincode for serialization + use bincode; + + // Both prover and verifier have access to the generators and the proof let generators = Generators::new(PedersenGenerators::default(), n, m); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let parties: Vec<_> = values - .iter() - .map(|&v| { - let v_blinding = Scalar::random(rng); - Party::new(v, v_blinding, n, &generators).unwrap() - }) - .collect(); - - let dealer = Dealer::new(n, m, &mut transcript).unwrap(); - - let (parties, value_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .enumerate() - .map(|(j, p)| p.assign_position(j, rng)) - .unzip(); - - let (dealer, value_challenge) = dealer - .receive_value_commitments(&value_commitments, &mut transcript) - .unwrap(); - let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .map(|p| p.apply_challenge(&value_challenge, rng)) - .unzip(); - - let (dealer, poly_challenge) = dealer - .receive_poly_commitments(&poly_commitments, &mut transcript) - .unwrap(); - - let proof_shares: Vec = parties - .into_iter() - .map(|p| p.apply_challenge(&poly_challenge)) - .collect(); - - dealer - .receive_shares(&proof_shares, &generators.all(), &mut transcript) - .unwrap() + // Serialized proof data + let proof_bytes: Vec; + + // Prover's scope + { + // 1. Generate the proof + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + // XXX this takes max = 2^{n-1} to avoid problems at n = 64 + // would be better to use max = 2^n - 1 + let (min, max) = (0u64, 1 << (n - 1)); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + + let proof = SinglePartyAggregator::generate_proof( + &generators, + &mut transcript, + &mut rng, + &values, + n, + ).unwrap(); + + // 2. Serialize + proof_bytes = bincode::serialize(&proof).unwrap(); + } + + println!( + "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", + m, + n, + proof_bytes.len(), + ); + + // Verifier's scope + { + // 3. Deserialize + let proof: AggregatedProof = bincode::deserialize(&proof_bytes).unwrap(); + + // 4. Verify with the same customization label as above + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + assert!(proof.verify(&mut rng, &mut transcript).is_ok()); + } } - fn test_u32(m: usize) { - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + #[test] + fn create_and_verify_n_32_m_1() { + singleparty_create_and_verify_helper(32, 1); + } - let v: Vec = iter::repeat(()) - .map(|()| rng.next_u32() as u64) - .take(m) - .collect(); - let (proof, share_verifiers) = create_multi(&mut rng, v, 32); - assert!(proof.verify(&mut rng, &mut transcript).is_ok()); - share_verifiers - .iter() - .map(|sv| assert!(sv.verify_share().is_ok())) - .last(); + #[test] + fn create_and_verify_n_32_m_2() { + singleparty_create_and_verify_helper(32, 2); } - fn test_u64(m: usize) { - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + #[test] + fn create_and_verify_n_32_m_4() { + singleparty_create_and_verify_helper(32, 4); + } - let v: Vec = iter::repeat(()).map(|()| rng.next_u64()).take(m).collect(); - let (proof, share_verifiers) = create_multi(&mut rng, v, 64); - assert!(proof.verify(&mut rng, &mut transcript).is_ok()); - share_verifiers - .iter() - .map(|sv| assert!(sv.verify_share().is_ok())) - .last(); + #[test] + fn create_and_verify_n_32_m_8() { + singleparty_create_and_verify_helper(32, 8); } #[test] - fn one_value() { - test_u32(1); - test_u64(1); + fn create_and_verify_n_64_m_1() { + singleparty_create_and_verify_helper(64, 1); } #[test] - fn two_values() { - test_u32(2); - test_u64(2); + fn create_and_verify_n_64_m_2() { + singleparty_create_and_verify_helper(64, 2); } #[test] - fn four_values() { - test_u32(4); - test_u64(4); + fn create_and_verify_n_64_m_4() { + singleparty_create_and_verify_helper(64, 4); } #[test] - fn eight_values() { - test_u32(8); - test_u64(8); + fn create_and_verify_n_64_m_8() { + singleparty_create_and_verify_helper(64, 8); } } From f92406f65b4da41d4028091f0dd0c03ad5c49129 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 11:07:24 -0700 Subject: [PATCH 135/186] Use a tmp u128 for computing an upper bound in test code --- src/aggregated_range_proof/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index ecea9526..d70b9e4c 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -108,9 +108,7 @@ mod tests { let mut rng = OsRng::new().unwrap(); let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - // XXX this takes max = 2^{n-1} to avoid problems at n = 64 - // would be better to use max = 2^n - 1 - let (min, max) = (0u64, 1 << (n - 1)); + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); let proof = SinglePartyAggregator::generate_proof( From de83342d5a3dd31dbdcc73e925efbc548b0cfdcc Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 12:08:44 -0700 Subject: [PATCH 136/186] Move transcript state inside of the Dealer state Previously, the transcript state was maintained by the protocol orchestrator, and passed in as a parameter to each step of the dealer's work. This changes the code so that the dealer holds on to the transcript reference until the protocol is complete. This means a) it's one less parameter b) it would disallow any other code from touching the transcript state while the dealer is active. b) is kind of a fringe benefit, but previously, it's conceivably possible to write code that fed an extra message into the transcript between dealer steps, which would give an invalid proof (since the challenges would not replay). --- src/aggregated_range_proof/dealer.rs | 62 ++++++++++++++-------------- src/aggregated_range_proof/mod.rs | 6 +-- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 297610b4..05f1ce83 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -13,11 +13,11 @@ pub struct Dealer {} impl Dealer { /// Creates a new dealer coordinating `m` parties proving `n`-bit ranges. - pub fn new( + pub fn new<'a>( n: usize, m: usize, - transcript: &mut ProofTranscript, - ) -> Result { + transcript: &'a mut ProofTranscript, + ) -> Result, &'static str> { if !n.is_power_of_two() || n > 64 { return Err("n is not valid: must be a power of 2, and less than or equal to 64"); } @@ -26,24 +26,23 @@ impl Dealer { } transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); - Ok(DealerAwaitingValueCommitments { n, m }) + Ok(DealerAwaitingValueCommitments { n, m, transcript }) } } /// When the dealer is initialized, it only knows the size of the set. -#[derive(Debug)] -pub struct DealerAwaitingValueCommitments { +pub struct DealerAwaitingValueCommitments<'a> { n: usize, m: usize, + transcript: &'a mut ProofTranscript, } -impl DealerAwaitingValueCommitments { +impl<'a> DealerAwaitingValueCommitments<'a> { /// Combines commitments and computes challenge variables. pub fn receive_value_commitments( self, value_commitments: &Vec, - transcript: &mut ProofTranscript, - ) -> Result<(DealerAwaitingPolyCommitments, ValueChallenge), &'static str> { + ) -> Result<(DealerAwaitingPolyCommitments<'a>, ValueChallenge), &'static str> { if self.m != value_commitments.len() { return Err("Length of value commitments doesn't match expected length m"); } @@ -53,24 +52,25 @@ impl DealerAwaitingValueCommitments { for commitment in value_commitments.iter() { // Commit each V individually - transcript.commit(commitment.V.compress().as_bytes()); + self.transcript.commit(commitment.V.compress().as_bytes()); // Commit sums of As and Ss. A += commitment.A; S += commitment.S; } - transcript.commit(A.compress().as_bytes()); - transcript.commit(S.compress().as_bytes()); + self.transcript.commit(A.compress().as_bytes()); + self.transcript.commit(S.compress().as_bytes()); - let y = transcript.challenge_scalar(); - let z = transcript.challenge_scalar(); + let y = self.transcript.challenge_scalar(); + let z = self.transcript.challenge_scalar(); let value_challenge = ValueChallenge { y, z }; Ok(( DealerAwaitingPolyCommitments { n: self.n, m: self.m, + transcript: self.transcript, value_challenge: value_challenge.clone(), }, value_challenge, @@ -78,19 +78,18 @@ impl DealerAwaitingValueCommitments { } } -#[derive(Debug)] -pub struct DealerAwaitingPolyCommitments { +pub struct DealerAwaitingPolyCommitments<'a> { n: usize, m: usize, + transcript: &'a mut ProofTranscript, value_challenge: ValueChallenge, } -impl DealerAwaitingPolyCommitments { +impl<'a> DealerAwaitingPolyCommitments<'a> { pub fn receive_poly_commitments( self, poly_commitments: &Vec, - transcript: &mut ProofTranscript, - ) -> Result<(DealerAwaitingProofShares, PolyChallenge), &'static str> { + ) -> Result<(DealerAwaitingProofShares<'a>, PolyChallenge), &'static str> { if self.m != poly_commitments.len() { return Err("Length of poly commitments doesn't match expected length m"); } @@ -102,16 +101,17 @@ impl DealerAwaitingPolyCommitments { T1 += commitment.T_1; T2 += commitment.T_2; } - transcript.commit(T1.compress().as_bytes()); - transcript.commit(T2.compress().as_bytes()); + self.transcript.commit(T1.compress().as_bytes()); + self.transcript.commit(T2.compress().as_bytes()); - let x = transcript.challenge_scalar(); + let x = self.transcript.challenge_scalar(); let poly_challenge = PolyChallenge { x }; Ok(( DealerAwaitingProofShares { n: self.n, m: self.m, + transcript: self.transcript, value_challenge: self.value_challenge, poly_challenge: poly_challenge.clone(), }, @@ -120,20 +120,19 @@ impl DealerAwaitingPolyCommitments { } } -#[derive(Debug)] -pub struct DealerAwaitingProofShares { +pub struct DealerAwaitingProofShares<'a> { n: usize, m: usize, + transcript: &'a mut ProofTranscript, value_challenge: ValueChallenge, poly_challenge: PolyChallenge, } -impl DealerAwaitingProofShares { +impl<'a> DealerAwaitingProofShares<'a> { pub fn receive_shares( self, proof_shares: &Vec, gen: &GeneratorsView, - transcript: &mut ProofTranscript, ) -> Result<(AggregatedProof, Vec), &'static str> { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); @@ -183,12 +182,13 @@ impl DealerAwaitingProofShares { let e_blinding = proof_shares .iter() .fold(Scalar::zero(), |acc, ps| acc + ps.e_blinding); - transcript.commit(t.as_bytes()); - transcript.commit(t_x_blinding.as_bytes()); - transcript.commit(e_blinding.as_bytes()); + + self.transcript.commit(t.as_bytes()); + self.transcript.commit(t_x_blinding.as_bytes()); + self.transcript.commit(e_blinding.as_bytes()); // Get a challenge value to combine statements for the IPP - let w = transcript.challenge_scalar(); + let w = self.transcript.challenge_scalar(); let Q = w * gen.pedersen_generators.B; let l_vec: Vec = proof_shares @@ -200,7 +200,7 @@ impl DealerAwaitingProofShares { .flat_map(|ps| ps.r_vec.clone().into_iter()) .collect(); let ipp_proof = inner_product_proof::InnerProductProof::create( - transcript, + self.transcript, &Q, util::exp_iter(self.value_challenge.y.invert()), gen.G.to_vec(), diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d70b9e4c..d52b95f6 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -53,7 +53,7 @@ impl SinglePartyAggregator { .unzip(); let (dealer, value_challenge) = - dealer.receive_value_commitments(&value_commitments, transcript)?; + dealer.receive_value_commitments(&value_commitments)?; let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties .into_iter() @@ -61,14 +61,14 @@ impl SinglePartyAggregator { .unzip(); let (dealer, poly_challenge) = - dealer.receive_poly_commitments(&poly_commitments, transcript)?; + dealer.receive_poly_commitments(&poly_commitments)?; let proof_shares: Vec<_> = parties .into_iter() .map(|p| p.apply_challenge(&poly_challenge)) .collect(); - let (proof, _) = dealer.receive_shares(&proof_shares, &generators.all(), transcript)?; + let (proof, _) = dealer.receive_shares(&proof_shares, &generators.all())?; Ok(proof) } From 4484ba39768fa5ce75c6873df951d5a56d176990 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 12:30:22 -0700 Subject: [PATCH 137/186] Move generator view into the dealer state This means that all of the dealer protocol steps only take the messages at that stage. --- src/aggregated_range_proof/dealer.rs | 44 ++++++++++++++++++---------- src/aggregated_range_proof/mod.rs | 10 +++---- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 05f1ce83..75d6123e 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -13,11 +13,12 @@ pub struct Dealer {} impl Dealer { /// Creates a new dealer coordinating `m` parties proving `n`-bit ranges. - pub fn new<'a>( + pub fn new<'a, 'b>( + gens: GeneratorsView<'b>, n: usize, m: usize, transcript: &'a mut ProofTranscript, - ) -> Result, &'static str> { + ) -> Result, &'static str> { if !n.is_power_of_two() || n > 64 { return Err("n is not valid: must be a power of 2, and less than or equal to 64"); } @@ -26,23 +27,30 @@ impl Dealer { } transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); - Ok(DealerAwaitingValueCommitments { n, m, transcript }) + Ok(DealerAwaitingValueCommitments { + n, + m, + transcript, + gens, + }) } } -/// When the dealer is initialized, it only knows the size of the set. -pub struct DealerAwaitingValueCommitments<'a> { +/// The initial dealer state, waiting for the parties to send value +/// commitments. +pub struct DealerAwaitingValueCommitments<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + gens: GeneratorsView<'b>, } -impl<'a> DealerAwaitingValueCommitments<'a> { +impl<'a, 'b> DealerAwaitingValueCommitments<'a, 'b> { /// Combines commitments and computes challenge variables. pub fn receive_value_commitments( self, value_commitments: &Vec, - ) -> Result<(DealerAwaitingPolyCommitments<'a>, ValueChallenge), &'static str> { + ) -> Result<(DealerAwaitingPolyCommitments<'a, 'b>, ValueChallenge), &'static str> { if self.m != value_commitments.len() { return Err("Length of value commitments doesn't match expected length m"); } @@ -71,6 +79,7 @@ impl<'a> DealerAwaitingValueCommitments<'a> { n: self.n, m: self.m, transcript: self.transcript, + gens: self.gens, value_challenge: value_challenge.clone(), }, value_challenge, @@ -78,18 +87,19 @@ impl<'a> DealerAwaitingValueCommitments<'a> { } } -pub struct DealerAwaitingPolyCommitments<'a> { +pub struct DealerAwaitingPolyCommitments<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + gens: GeneratorsView<'b>, value_challenge: ValueChallenge, } -impl<'a> DealerAwaitingPolyCommitments<'a> { +impl<'a, 'b> DealerAwaitingPolyCommitments<'a, 'b> { pub fn receive_poly_commitments( self, poly_commitments: &Vec, - ) -> Result<(DealerAwaitingProofShares<'a>, PolyChallenge), &'static str> { + ) -> Result<(DealerAwaitingProofShares<'a, 'b>, PolyChallenge), &'static str> { if self.m != poly_commitments.len() { return Err("Length of poly commitments doesn't match expected length m"); } @@ -112,6 +122,7 @@ impl<'a> DealerAwaitingPolyCommitments<'a> { n: self.n, m: self.m, transcript: self.transcript, + gens: self.gens, value_challenge: self.value_challenge, poly_challenge: poly_challenge.clone(), }, @@ -120,19 +131,19 @@ impl<'a> DealerAwaitingPolyCommitments<'a> { } } -pub struct DealerAwaitingProofShares<'a> { +pub struct DealerAwaitingProofShares<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + gens: GeneratorsView<'b>, value_challenge: ValueChallenge, poly_challenge: PolyChallenge, } -impl<'a> DealerAwaitingProofShares<'a> { +impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { pub fn receive_shares( self, proof_shares: &Vec, - gen: &GeneratorsView, ) -> Result<(AggregatedProof, Vec), &'static str> { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); @@ -189,7 +200,7 @@ impl<'a> DealerAwaitingProofShares<'a> { // Get a challenge value to combine statements for the IPP let w = self.transcript.challenge_scalar(); - let Q = w * gen.pedersen_generators.B; + let Q = w * self.gens.pedersen_generators.B; let l_vec: Vec = proof_shares .iter() @@ -199,12 +210,13 @@ impl<'a> DealerAwaitingProofShares<'a> { .iter() .flat_map(|ps| ps.r_vec.clone().into_iter()) .collect(); + let ipp_proof = inner_product_proof::InnerProductProof::create( self.transcript, &Q, util::exp_iter(self.value_challenge.y.invert()), - gen.G.to_vec(), - gen.H.to_vec(), + self.gens.G.to_vec(), + self.gens.H.to_vec(), l_vec.clone(), r_vec.clone(), ); diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d52b95f6..ac7fe61a 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -35,7 +35,7 @@ impl SinglePartyAggregator { use self::messages::*; use self::party::*; - let dealer = Dealer::new(n, values.len(), transcript)?; + let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; let parties: Vec<_> = values .iter() @@ -52,23 +52,21 @@ impl SinglePartyAggregator { .map(|(j, p)| p.assign_position(j, rng)) .unzip(); - let (dealer, value_challenge) = - dealer.receive_value_commitments(&value_commitments)?; + let (dealer, value_challenge) = dealer.receive_value_commitments(&value_commitments)?; let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties .into_iter() .map(|p| p.apply_challenge(&value_challenge, rng)) .unzip(); - let (dealer, poly_challenge) = - dealer.receive_poly_commitments(&poly_commitments)?; + let (dealer, poly_challenge) = dealer.receive_poly_commitments(&poly_commitments)?; let proof_shares: Vec<_> = parties .into_iter() .map(|p| p.apply_challenge(&poly_challenge)) .collect(); - let (proof, _) = dealer.receive_shares(&proof_shares, &generators.all())?; + let (proof, _) = dealer.receive_shares(&proof_shares)?; Ok(proof) } From 7638b2f8df4888617a348c85e918301d5ea76b8c Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 12:38:20 -0700 Subject: [PATCH 138/186] tweak: take &[T] instead of &Vec --- src/aggregated_range_proof/dealer.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 75d6123e..c50f6fbd 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -49,7 +49,7 @@ impl<'a, 'b> DealerAwaitingValueCommitments<'a, 'b> { /// Combines commitments and computes challenge variables. pub fn receive_value_commitments( self, - value_commitments: &Vec, + value_commitments: &[ValueCommitment], ) -> Result<(DealerAwaitingPolyCommitments<'a, 'b>, ValueChallenge), &'static str> { if self.m != value_commitments.len() { return Err("Length of value commitments doesn't match expected length m"); @@ -98,7 +98,7 @@ pub struct DealerAwaitingPolyCommitments<'a, 'b> { impl<'a, 'b> DealerAwaitingPolyCommitments<'a, 'b> { pub fn receive_poly_commitments( self, - poly_commitments: &Vec, + poly_commitments: &[PolyCommitment], ) -> Result<(DealerAwaitingProofShares<'a, 'b>, PolyChallenge), &'static str> { if self.m != poly_commitments.len() { return Err("Length of poly commitments doesn't match expected length m"); @@ -143,7 +143,7 @@ pub struct DealerAwaitingProofShares<'a, 'b> { impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { pub fn receive_shares( self, - proof_shares: &Vec, + proof_shares: &[ProofShare], ) -> Result<(AggregatedProof, Vec), &'static str> { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); From c2c9b56ed592fa843e9a1c8e7ee37e34a91cb70f Mon Sep 17 00:00:00 2001 From: Cathie Date: Thu, 26 Apr 2018 13:52:04 -0700 Subject: [PATCH 139/186] add aggregated range proof notes to notes doc --- docs/notes.md | 242 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 240 insertions(+), 2 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index e3398e3f..611c2de1 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -196,7 +196,7 @@ that polynomial, making the probability that the prover cheated negligible. This trick allows implementing logical `AND` with any number of terms. -Combining inner-products +Combining inner products ------------------------ Finally, we want to combine these terms into a single inner product. Our @@ -603,4 +603,242 @@ sends a pair \\((L\_j,R\_j)\\) of points at each step \\(j = k\dots1\\). An additional and final step involves sending a pair of scalars \\((a^{(0)}\_0,b^{(0)}\_0)\\) and checking the final relation directly. -[bulletproofs_paper]: https://eprint.iacr.org/2017/1066.pdf +Aggregated Range Proof +====================== + +We want to create an aggregated range proof for `m` values (`m` parties) that is more efficient to create and verify than `m` individual range proofs. + +The aggregated range proof has the same form as the individual range proof, but differs in that different parties are seperated by different powers of the challenge scalars `y` and `z`. + +We will explain how one piece of the aggregated proof is generated for party `j`, and then will show how all of the pieces for all of the `m` parties can be combined into one aggregated proof. + +Party `j` begins with a secret value \\(v_j\\), and wishes to convince the verifier that \\(v_j \in [0, 2^n)\\) without revealing \\(v_j\\). + +Proving range statements with bit vectors +----------------------------------------- + +Let \\({\mathbf{a}}\_{Lj}\\) be the vector of bits of \\(v_j\\). +Then \\(v_j\\) can be represented as: +\\[ +\begin{aligned} + v_j &= {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} +\end{aligned} +\\] +We need \\({\mathbf{a}}\_{Lj}\\) to be a vector of integers \\(\\{0,1\\}\\). +This can be expressed with additional conditions: +\\[ +\begin{aligned} + {\mathbf{a}}\_{Lj} \circ {\mathbf{a}}\_{Rj} &= {\mathbf{0}} \\\\ + ({\mathbf{a}}\_{Lj} - {\mathbf{1}}) - {\mathbf{a}}\_{Rj} &= {\mathbf{0}} +\end{aligned} +\\] + +Proving vectors of statements with a single statement +----------------------------------------------------- + +We want to combine the above three statements into a single statement for party `j`. We will also introduce challenge values \\(y_j\\) and \\(z_j\\) that are unique to each party `j`, and use them to help combine the statements. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. + +First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by \\(y_j = y^{j \cdot n}\\) that is unique to each party `j`: + +\\[ +\begin{aligned} + {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} &= v \\\\ + {\langle {\mathbf{a}}\_{Lj} - {\mathbf{1}} - {\mathbf{a}}\_{Rj}, {\mathbf{y}}^{n} \cdot y_j \rangle} &= 0 \\\\ + {\langle {\mathbf{a}}\_{Lj}, {\mathbf{a}}\_{Rj} \circ {\mathbf{y}}^{n} \cdot y_j \rangle} &= 0 +\end{aligned} +\\] + +The three resulting statements can then be combined in the same way, +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by \\(z_j = z^j\\) that is unique to each party `j`: +\\[ +\begin{aligned} +z^{2} z_j \cdot v +&= + z^{2} z_j \cdot {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} \\\\ + &+ z \cdot {\langle {\mathbf{a}}\_{Lj} - {\mathbf{1}} - {\mathbf{a}}\_{Rj}, {\mathbf{y}}^{n} \cdot y_j \rangle} \\\\ + &+ {\langle {\mathbf{a}}\_{Lj}, {\mathbf{a}}\_{Rj} \circ {\mathbf{y}}^{n} \cdot y_j \rangle} +\end{aligned} +\\] + +Combining inner products +------------------------ + +We combine the terms in the last statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here, only the end result: + +\\[ +\begin{aligned} + \delta_j(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \rangle} - z^{3} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ + z^{2}z_j \cdot v + \delta_j(y,z) &= {\langle {\mathbf{a}}\_{Lj} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{Rj} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} +\end{aligned} +\\] + +Blinding the inner product +-------------------------- + +The prover chooses vectors of blinding factors +\\[ +{\mathbf{s}}\_{Lj}, {\mathbf{s}}\_{Rj} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n} +\\] +and uses them to construct blinded vector polynomials: +\\[ +\begin{aligned} + {\mathbf{l}}\_j(x) &= ({\mathbf{a}}\_{Lj} + {\mathbf{s}}\_{Lj} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}\_j(x) &= {\mathbf{y}}^{n} \cdot y_j \circ \left( ({\mathbf{a}}\_{Rj} + {\mathbf{s}}\_{Rj} x\right) + z {\mathbf{1}}) + z^{2} z_j {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} +\end{aligned} +\\] +The vector polynomials \\({\mathbf{l}}\_j(x)\\) and \\({\mathbf{r}}\_j(x)\\) are related to \\(t(x)\\) by the following equations: +\\[ +\begin{aligned} + t_j(x) &= {\langle {\mathbf{l}}\_j(x), {\mathbf{r}}\_j(x) \rangle} \\\\ + &= t\_{j0} + t\_{j1} x + t\_{j2} x^{2} \\\\ + t\_{j0} &= {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j0} \rangle} \\\\ + &= {\langle {\mathbf{a}}\_{Lj} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{Rj} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} \\\\ + t\_{j1} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j0} \rangle} + {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j1} \rangle} \\\\ + t\_{j2} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j1} \rangle} \\\\ +\end{aligned} +\\] + +Proving that \\(t(x)\\) is correct +---------------------------------- + +Proving that \\(t\_j(x)\\) is correct means proving that +\\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) are correctly formed, and that +\\(t_j(x) = {\langle {\mathbf{l}}\_j(x), {\mathbf{r}}\_j(x) \rangle}\\). + +We can combine the statements about \\(t_j(x)\\), \\({\mathbf{l}}\_j(x)\\), and \\({\mathbf{r}}\_j(x)\\) from all `m` parties in the following manner: + +\\[ +\begin{aligned} + t(x) &= \sum_{j=0}^{m-1} t_j(x)\\\\ + {\mathbf{l}}(x) &= {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x) \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{j=0}(x) || {\mathbf{r}}\_{j=1}(x) || \dots || {\mathbf{r}}\_{j=m-1}(x) \\\\ +\end{aligned} +\\] + +We can add the \\(t_j(x)\\) values together to create \\(t(x)\\) because each \\(t_j(x)\\) is calculated using the \\(y_j\\) and \\(z_j\\) challenge variables that are unique to each party `j`, so all of the \\(t_j(x)\\) values will be offset from one another. + +Now instead of having to do `m` individual checks to prove that \\(t_j(x)\\), \\({\mathbf{l}}\_j(x)\\), and \\({\mathbf{r}}\_j(x)\\) for all parties `j` are correct, we can do the verification with one check: + +\\[ +\begin{aligned} + t(x) \stackrel{?}{=} {\langle {\mathbf{l}}(x), {\mathbf{r}}(x) \rangle} +\end{aligned} +\\] + +We can do this check using the inner product proof, in the same way the single-value range proof uses the inner product proof. + +Proving that \\(t_0\\) is correct +--------------------------------- + +Proving that \\(t\_{j0}\\) is correct requires first creating commitments to the variables, and then proving the following relation (for an explanation of how the commitments are created and how the relation is derived, see the single-value range proof notes): + +\\[ +\begin{aligned} + t_j(x) B + {\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 z_j V_j + \delta_j(y,z) B + x T\_{j1} + x^{2} T\_{j2}\\\\ + \delta_j(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \rangle} - z^{3} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} +\end{aligned} +\\] + +If we combine all of the statements about \\(t\_{j0}\\) from all of the `j` parties by adding them together, then we get: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}t_j(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_j + \sum_{j=0}^{m-1} \delta_j(y,z) B + x \sum_{j=0}^{m-1} T\_{j1} + x^{2} \sum_{j=0}^{m-1} T\_{j2} +\end{aligned} +\\] + +We can combine the party-specifc values in the following manner: + +\\[ +\begin{aligned} + t(x) &= \sum_{j=0}^{m-1} t_j(x)\\\\ + {\tilde{t}}(x) &= \sum_{j=0}^{m-1}{\tilde{t}}\_j(x)\\\\ + T_1 &= \sum_{j=0}^{m-1} T_{j1}\\\\ + T_2 &= \sum_{j=0}^{m-1} T_{j2}\\\\ + \delta(y,z) &= \sum_{j=0}^{m-1} \delta_j(y,z)\\\\ + &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Now instead of having to do `m` individual checks to prove that \\(t\_{j0}\\) for all parties `j` are correct, we can do the verification with one check using the combined values: + +\\[ +\begin{aligned} + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_j + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Since we know that \\(z_j = z^j\\), we can rewrite the equation as follows: + +\\[ +\begin{aligned} + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} \sum_{j=0}^{m-1} z^{j+2} V_j + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - \sum_{j=0}^{m-1} z^{j+3} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct +--------------------------------------------------------------------- + +Proving that \\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) are correct requires first creating commitments to the variables, and then proving the following relation (for an explanation of how the commitments are created and how the relation is derived, see the single-value range proof notes): + +\\[ +\begin{aligned} + {\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} + {\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} \stackrel{?}{=} -{\widetilde{e}\_j} {\widetilde{B}} + A_j + x S_j - z{\langle {\mathbf{1}}, {\mathbf{G}\_j} \rangle} + {\langle z {\mathbf{1}} + z^2 z_j \cdot {\mathbf{y}^{-n}} y_j^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} +\end{aligned} +\\] + +Where \\({\mathbf{G}\_j}\\) is party `j`'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_j}\\) is party `j`'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). + +If we combine all the statements about \\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) from all the `m` parties by adding them together, then we get: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} \stackrel{?}{=} -\sum_{j=0}^{m-1}{\widetilde{e}\_j} {\widetilde{B}} + \sum_{j=0}^{m-1}A_j + x \sum_{j=0}^{m-1}S_j - z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_j} \rangle} + \sum_{j=0}^{m-1}{\langle z {\mathbf{1}} + z^2 z_j \cdot {\mathbf{y}^{-n}} y_j^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} +\end{aligned} +\\] + +We can simplify this expression by making a few observations. We know that: + +\\[ +\begin{aligned} + {\mathbf{l}}(x) &= {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x) \\\\ + {\mathbf{G}} &= {\mathbf{G}}\_{0} || {\mathbf{G}}\_{1} || \dots || {\mathbf{G}}\_{m-1} \\\\ + y_j &= y^{j \cdot n} \\\\ + z_j &= z^j +\end{aligned} +\\] + +Therefore, we can simplify the following statements: + +\\[ +\begin{aligned} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} &= {\langle {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x), {\mathbf{G}}\_{0} || {\mathbf{G}}\_{1} || \dots || {\mathbf{G}}\_{m-1} \rangle} \\\\ + &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} \\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} &= {\langle {\mathbf{r}}\_{j=0}(x) \circ {\mathbf{y}^{-n}} y_0^{-1} || {\mathbf{r}}\_{j=1}(x) \circ {\mathbf{y}^{-n}} y_1^{-1} || \dots || {\mathbf{r}}\_{j=m-1}(x) \circ {\mathbf{y}^{-n}} y_{m-1}^{-1}, {\mathbf{H}}\_{0} || {\mathbf{H}}\_{1} || \dots || {\mathbf{H}}\_{m-1} \rangle}\\\\ + &= {\langle {\mathbf{r}}\_{j=0}(x) \circ {\mathbf{y}^{-n}} y^{0 \cdot n} || {\mathbf{r}}\_{j=1}(x) \circ {\mathbf{y}^{-n}} y^{-1 \cdot n} || \dots || {\mathbf{r}}\_{j=m-1}(x) \circ {\mathbf{y}^{-n}} y^{-(m-1)\cdot n}, {\mathbf{H}}\_{0} || {\mathbf{H}}\_{1} || \dots || {\mathbf{H}}\_{m-1} \rangle}\\\\ + &= {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}}, {\mathbf{H}} \rangle} +\end{aligned} +\\] + +We can also combine some party-specific values in the following manner: + +\\[ +\begin{aligned} + {\widetilde{e}} &= \sum_{j=0}^{m-1} {\widetilde{e}\_j} \\\\ + A &= \sum_{j=0}^{m-1} A_j \\\\ + S &= \sum_{j=0}^{m-1} S_j \\\\ +\end{aligned} +\\] + +With these observations, we can simplify the combined `m`-party statement about \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) into: + +\\[ +\begin{aligned} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{y}^{-n}} y^{-j\cdot n} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} +\end{aligned} +\\] + + +[bulletproofs_paper]: https://eprint.iacr.org/2017/1066.pdf \ No newline at end of file From cd76c270a4fcd0a94fcba9a87a2c8c093fec934f Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 26 Apr 2018 15:43:23 -0700 Subject: [PATCH 140/186] fixup! use standard tiny-keccak with duplex construction --- src/proof_transcript.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index 65419470..9c3ad6b5 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -170,8 +170,8 @@ impl ProofTranscript { self.hash.pad(); self.hash.fill_block(); self.write_offset = 0; - room = self.rate - 1; input = &input[room..]; + room = self.rate - 1; } self.hash.absorb(input); self.write_offset += input.len(); // could end up == (rate-1) From b56d2bfed40d9fdac3a8c933a38213cd51e3d1cd Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 15:24:04 -0700 Subject: [PATCH 141/186] first try at travis --- .travis.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..715f1b7a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,17 @@ +language: rust + +rust: + - nightly + +env: + - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='' + - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='yolocrypto' + +script: + - cargo $TEST_COMMAND --features="$FEATURES" $EXTRA_FLAGS + +# enable this integration if we upstream the repo +#notifications: +# slack: +# rooms: +# - dalek-cryptography:Xxv9WotKYWdSoKlgKNqXiHoD#dalek-bots From 62854e687d9413fcdc66ceb30b5a2f75095b8d21 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 15:00:09 -0700 Subject: [PATCH 142/186] Refactor aggregation logic to allow choice in validation The aggregation logic is moved to a private `assemble_shares` function. This is called by two external entry points: - `receive_trusted_shares` :: does not validate - `receive_shares` :: does validate & blame (just not yet) --- src/aggregated_range_proof/dealer.rs | 98 +++++++++++++++++++++++----- src/aggregated_range_proof/mod.rs | 4 +- 2 files changed, 82 insertions(+), 20 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index c50f6fbd..2e9c3fa1 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -1,9 +1,13 @@ +use rand::Rng; + use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::Identity; + use generators::GeneratorsView; use inner_product_proof; use proof_transcript::ProofTranscript; + use util; use super::messages::*; @@ -25,12 +29,30 @@ impl Dealer { if !m.is_power_of_two() { return Err("m is not valid: must be a power of 2"); } + + // At the end of the protocol, the dealer will attempt to + // verify the proof, and if it fails, determine which party's + // shares were invalid. + // + // However, verifying the proof requires either knowledge of + // all of the challenges, or a copy of the initial transcript + // state. + // + // The dealer has all of the challenges, but using them for + // verification would require duplicating the verification + // logic. Instead, we keep a copy of the initial transcript + // state. + let initial_transcript = transcript.clone(); + + // Commit to aggregation parameters transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); + Ok(DealerAwaitingValueCommitments { n, m, transcript, + initial_transcript, gens, }) } @@ -42,6 +64,9 @@ pub struct DealerAwaitingValueCommitments<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + /// The dealer keeps a copy of the initial transcript state, so + /// that it can attempt to verify the aggregated proof at the end. + initial_transcript: ProofTranscript, gens: GeneratorsView<'b>, } @@ -79,6 +104,7 @@ impl<'a, 'b> DealerAwaitingValueCommitments<'a, 'b> { n: self.n, m: self.m, transcript: self.transcript, + initial_transcript: self.initial_transcript, gens: self.gens, value_challenge: value_challenge.clone(), }, @@ -91,6 +117,7 @@ pub struct DealerAwaitingPolyCommitments<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + initial_transcript: ProofTranscript, gens: GeneratorsView<'b>, value_challenge: ValueChallenge, } @@ -122,6 +149,7 @@ impl<'a, 'b> DealerAwaitingPolyCommitments<'a, 'b> { n: self.n, m: self.m, transcript: self.transcript, + initial_transcript: self.initial_transcript, gens: self.gens, value_challenge: self.value_challenge, poly_challenge: poly_challenge.clone(), @@ -135,31 +163,26 @@ pub struct DealerAwaitingProofShares<'a, 'b> { n: usize, m: usize, transcript: &'a mut ProofTranscript, + initial_transcript: ProofTranscript, gens: GeneratorsView<'b>, value_challenge: ValueChallenge, poly_challenge: PolyChallenge, } impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { - pub fn receive_shares( - self, + /// Assembles proof shares into an `AggregatedProof`. + /// + /// Used as a helper function by `receive_trusted_shares` (which + /// just hands back the result) and `receive_shares` (which + /// validates the proof shares. + fn assemble_shares( + &mut self, proof_shares: &[ProofShare], - ) -> Result<(AggregatedProof, Vec), &'static str> { + ) -> Result { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); } - let mut share_verifiers = Vec::new(); - for (j, proof_share) in proof_shares.iter().enumerate() { - share_verifiers.push(ProofShareVerifier { - proof_share: proof_share.clone(), - n: self.n, - j: j, - value_challenge: self.value_challenge.clone(), - poly_challenge: self.poly_challenge.clone(), - }); - } - let value_commitments = proof_shares .iter() .map(|ps| ps.value_commitment.V) @@ -221,7 +244,7 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { r_vec.clone(), ); - let aggregated_proof = AggregatedProof { + Ok(AggregatedProof { n: self.n, value_commitments, A, @@ -232,8 +255,49 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { t_x_blinding, e_blinding, ipp_proof, - }; + }) + } + + /// Assemble the final aggregated proof from the given + /// `proof_shares`, and validate that all input shares and the + /// aggregated proof are well-formed. If the aggregated proof is + /// not well-formed, this function detects which party submitted a + /// malformed share and returns that information as part of the + /// error. + /// + /// XXX define error types so we can surface the blame info + pub fn receive_shares( + mut self, + rng: &mut R, + proof_shares: &[ProofShare], + ) -> Result { + let proof = self.assemble_shares(proof_shares)?; - Ok((aggregated_proof, share_verifiers)) + if proof.verify(rng, &mut self.initial_transcript).is_ok() { + return Ok(proof); + } + + // XXX check shares + return Err("proof failed to verify"); + } + + /// Assemble the final aggregated proof from the given + /// `proof_shares`, but does not validate that they are well-formed. + /// + /// ## WARNING + /// + /// This function does **NOT** validate the proof shares. It is + /// suitable for creating aggregated proofs when all parties are + /// known by the dealer to be honest (for instance, when there's + /// only one party playing all roles). + /// + /// Otherwise, use `receive_shares`, which validates that all + /// shares are well-formed, or else detects which party(ies) + /// submitted malformed shares. + pub fn receive_trusted_shares( + mut self, + proof_shares: &[ProofShare], + ) -> Result { + self.assemble_shares(proof_shares) } } diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index ac7fe61a..9d5337e9 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -66,9 +66,7 @@ impl SinglePartyAggregator { .map(|p| p.apply_challenge(&poly_challenge)) .collect(); - let (proof, _) = dealer.receive_shares(&proof_shares)?; - - Ok(proof) + dealer.receive_trusted_shares(&proof_shares) } } From 8a7d6a9c1065507c7b8def48aa43caf85c7ae822 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 16:48:38 -0700 Subject: [PATCH 143/186] Add a test that dishonest aggregation fails --- src/aggregated_range_proof/mod.rs | 55 +++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 9d5337e9..d4f98b23 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -178,4 +178,59 @@ mod tests { fn create_and_verify_n_64_m_8() { singleparty_create_and_verify_helper(64, 8); } + + #[test] + fn detect_dishonest_party_during_aggregation() { + use self::dealer::*; + use self::messages::*; + use self::party::*; + + // Simulate two parties, one of which will be dishonest and use a 64-bit value. + let m = 2; + let n = 32; + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + // Party 0 is honest and uses a 32-bit value + let v0 = rng.next_u32() as u64; + let v0_blinding = Scalar::random(&mut rng); + let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); + + // Party 1 is dishonest and uses a 64-bit value + let v1 = rng.next_u64(); + let v1_blinding = Scalar::random(&mut rng); + let party1 = Party::new(v1, v1_blinding, n, &generators).unwrap(); + + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); + + let (party0, value_com0) = party0.assign_position(0, &mut rng); + let (party1, value_com1) = party1.assign_position(1, &mut rng); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&[value_com0, value_com1]) + .unwrap(); + + let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); + let (party1, poly_com1) = party1.apply_challenge(&value_challenge, &mut rng); + + let (dealer, poly_challenge) = dealer + .receive_poly_commitments(&[poly_com0, poly_com1]) + .unwrap(); + + let share0 = party0.apply_challenge(&poly_challenge); + let share1 = party1.apply_challenge(&poly_challenge); + + match dealer.receive_shares(&mut rng, &[share0, share1]) { + Ok(_proof) => { + panic!("The proof was malformed, but it was not detected"); + } + Err(e) => { + // XXX when we have error types, check that it was party 1 that did it + assert_eq!(e, "proof failed to verify"); + } + } + } } From da9b59b3ba50ce637e4d94e8860a397aae0a56bb Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 17:30:32 -0700 Subject: [PATCH 144/186] Change the dishonest test to four parties so we can test multidetection --- src/aggregated_range_proof/mod.rs | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index d4f98b23..45686b61 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -185,8 +185,8 @@ mod tests { use self::messages::*; use self::party::*; - // Simulate two parties, one of which will be dishonest and use a 64-bit value. - let m = 2; + // Simulate four parties, two of which will be dishonest and use a 64-bit value. + let m = 4; let n = 32; let generators = Generators::new(PedersenGenerators::default(), n, m); @@ -194,36 +194,50 @@ mod tests { let mut rng = OsRng::new().unwrap(); let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - // Party 0 is honest and uses a 32-bit value + // Parties 0, 2 are honest and use a 32-bit value let v0 = rng.next_u32() as u64; let v0_blinding = Scalar::random(&mut rng); let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); - // Party 1 is dishonest and uses a 64-bit value + let v2 = rng.next_u32() as u64; + let v2_blinding = Scalar::random(&mut rng); + let party2 = Party::new(v2, v2_blinding, n, &generators).unwrap(); + + // Parties 1, 3 are dishonest and use a 64-bit value let v1 = rng.next_u64(); let v1_blinding = Scalar::random(&mut rng); let party1 = Party::new(v1, v1_blinding, n, &generators).unwrap(); + let v3 = rng.next_u64(); + let v3_blinding = Scalar::random(&mut rng); + let party3 = Party::new(v3, v3_blinding, n, &generators).unwrap(); + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); let (party0, value_com0) = party0.assign_position(0, &mut rng); let (party1, value_com1) = party1.assign_position(1, &mut rng); + let (party2, value_com2) = party2.assign_position(2, &mut rng); + let (party3, value_com3) = party3.assign_position(3, &mut rng); let (dealer, value_challenge) = dealer - .receive_value_commitments(&[value_com0, value_com1]) + .receive_value_commitments(&[value_com0, value_com1, value_com2, value_com3]) .unwrap(); let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); let (party1, poly_com1) = party1.apply_challenge(&value_challenge, &mut rng); + let (party2, poly_com2) = party2.apply_challenge(&value_challenge, &mut rng); + let (party3, poly_com3) = party3.apply_challenge(&value_challenge, &mut rng); let (dealer, poly_challenge) = dealer - .receive_poly_commitments(&[poly_com0, poly_com1]) + .receive_poly_commitments(&[poly_com0, poly_com1, poly_com2, poly_com3]) .unwrap(); let share0 = party0.apply_challenge(&poly_challenge); let share1 = party1.apply_challenge(&poly_challenge); + let share2 = party2.apply_challenge(&poly_challenge); + let share3 = party3.apply_challenge(&poly_challenge); - match dealer.receive_shares(&mut rng, &[share0, share1]) { + match dealer.receive_shares(&mut rng, &[share0, share1, share2, share3]) { Ok(_proof) => { panic!("The proof was malformed, but it was not detected"); } From 4d922a237df7678fd586afeb7faae19a86e060e1 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 17:33:25 -0700 Subject: [PATCH 145/186] Pinpoint which shares were invalid. This isn't currently propagated upwards, since we haven't defined custom error types yet. --- src/aggregated_range_proof/dealer.rs | 18 ++++++++++++++---- src/aggregated_range_proof/messages.rs | 16 ---------------- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 2e9c3fa1..01aa8570 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -273,12 +273,22 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { ) -> Result { let proof = self.assemble_shares(proof_shares)?; + // See comment in `Dealer::new` for why we use `initial_transcript` if proof.verify(rng, &mut self.initial_transcript).is_ok() { - return Ok(proof); + Ok(proof) + } else { + // Create a list of bad shares + let mut bad_shares = Vec::new(); + for (j, share) in proof_shares.iter().enumerate() { + match share.verify_share(self.n, j, &self.value_challenge, &self.poly_challenge) { + Ok(_) => {} + Err(_) => bad_shares.push(j), + } + } + // XXX pass this upwards + println!("bad shares: {:?}", bad_shares); + Err("proof failed to verify") } - - // XXX check shares - return Err("proof failed to verify"); } /// Assemble the final aggregated proof from the given diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 50c2b5fa..9069c122 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -120,22 +120,6 @@ impl ProofShare { } } -pub struct ProofShareVerifier { - pub proof_share: ProofShare, - pub n: usize, - pub j: usize, - pub value_challenge: ValueChallenge, - pub poly_challenge: PolyChallenge, -} - -impl ProofShareVerifier { - /// Returns whether the proof share is valid (Ok) or invalid (Err) - pub fn verify_share(&self) -> Result<(), &'static str> { - self.proof_share - .verify_share(self.n, self.j, &self.value_challenge, &self.poly_challenge) - } -} - #[derive(Serialize, Deserialize, Clone, Debug)] pub struct AggregatedProof { pub n: usize, From ff5aee894ae4e2a88af6b6e31da34b9cce672206 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Thu, 26 Apr 2018 17:34:11 -0700 Subject: [PATCH 146/186] Fix warnings --- src/aggregated_range_proof/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 45686b61..9d436774 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -13,7 +13,7 @@ pub mod party; pub use self::messages::AggregatedProof; -struct SinglePartyAggregator {} +pub struct SinglePartyAggregator {} impl SinglePartyAggregator { /// Create an aggregated rangeproof of multiple values. @@ -24,7 +24,7 @@ impl SinglePartyAggregator { /// The length of `values` must be a power of 2. /// /// XXX this should allow proving about existing commitments. - fn generate_proof( + pub fn generate_proof( generators: &Generators, transcript: &mut ProofTranscript, rng: &mut R, @@ -32,7 +32,6 @@ impl SinglePartyAggregator { n: usize, ) -> Result { use self::dealer::*; - use self::messages::*; use self::party::*; let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; @@ -182,7 +181,6 @@ mod tests { #[test] fn detect_dishonest_party_during_aggregation() { use self::dealer::*; - use self::messages::*; use self::party::*; // Simulate four parties, two of which will be dishonest and use a 64-bit value. From bcb944a9f1d60c4a290b1185ed6b32d256f49b65 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 27 Apr 2018 15:22:03 -0700 Subject: [PATCH 147/186] Add aggregate proof benchmarks --- benches/bulletproofs.rs | 95 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 1 deletion(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index 25bc639e..c1597e0b 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -112,4 +112,97 @@ criterion_group!{ targets = verify_rp_8, verify_rp_16, verify_rp_32, verify_rp_64 } -criterion_main!(create_rp, verify_rp); +static AGGREGATION_SIZES: [usize; 6] = [1, 2, 4, 8, 16, 32]; + +fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { + use ristretto_bulletproofs::aggregated_range_proof::SinglePartyAggregator; + + let label = format!("Aggregated {}-bit rangeproof creation", n); + + c.bench_function_over_inputs( + &label, + move |b, &&m| { + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut rng = OsRng::new().unwrap(); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + + b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + + SinglePartyAggregator::generate_proof( + &generators, + &mut transcript, + &mut rng, + &values, + n, + ) + }) + }, + &AGGREGATION_SIZES, + ); +} + +fn create_aggregated_rangeproof_n_32(c: &mut Criterion) { + create_aggregated_rangeproof_helper(32, c); +} + +fn create_aggregated_rangeproof_n_64(c: &mut Criterion) { + create_aggregated_rangeproof_helper(64, c); +} + +fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { + use ristretto_bulletproofs::aggregated_range_proof::SinglePartyAggregator; + + let label = format!("Aggregated {}-bit rangeproof verification", n); + + c.bench_function_over_inputs( + &label, + move |b, &&m| { + let generators = Generators::new(PedersenGenerators::default(), n, m); + let mut rng = OsRng::new().unwrap(); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + let proof = SinglePartyAggregator::generate_proof( + &generators, + &mut transcript, + &mut rng, + &values, + n, + ).unwrap(); + + b.iter(|| { + // Each proof creation requires a clean transcript. + let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); + + proof.verify(&mut rng, &mut transcript) + }); + }, + &AGGREGATION_SIZES, + ); +} + +fn verify_aggregated_rangeproof_n_32(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(32, c); +} + +fn verify_aggregated_rangeproof_n_64(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(64, c); +} + +criterion_group!{ + name = aggregate_rp; + config = Criterion::default(); + targets = + create_aggregated_rangeproof_n_32, + create_aggregated_rangeproof_n_64, + verify_aggregated_rangeproof_n_32, + verify_aggregated_rangeproof_n_64 +} + +criterion_main!(create_rp, verify_rp, aggregate_rp); From a65e4220a5171929c6259cd928e8f795d65f43d2 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 27 Apr 2018 17:27:57 -0700 Subject: [PATCH 148/186] Pass generators into the aggregated proof code and shorten proofs This saves a ton of work building the generators, and makes the proofs smaller by dropping the value commitments and the bitsize/number of parties. Actually using the `SinglePartyAggregator` is a bit awkward, since it requires creating all of the value commitments by hand. I'm not sure what the best solution to that is yet. Progress towards #74 --- src/aggregated_range_proof/dealer.rs | 25 ++++++++---- src/aggregated_range_proof/messages.rs | 54 +++++++++++++------------- src/aggregated_range_proof/mod.rs | 40 ++++++++++++++++--- 3 files changed, 79 insertions(+), 40 deletions(-) diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 01aa8570..16ef4bed 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -183,10 +183,6 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { return Err("Length of proof shares doesn't match expected length m"); } - let value_commitments = proof_shares - .iter() - .map(|ps| ps.value_commitment.V) - .collect(); let A = proof_shares .iter() .fold(RistrettoPoint::identity(), |A, ps| { @@ -245,8 +241,6 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { ); Ok(AggregatedProof { - n: self.n, - value_commitments, A, S, T_1, @@ -273,8 +267,25 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { ) -> Result { let proof = self.assemble_shares(proof_shares)?; + // XXX if we change the proof verification API to use + // iterators we can do it with ZeRo-CoSt-AbStRaCtIonS + let value_commitments: Vec<_> = proof_shares + .iter() + .map(|ps| ps.value_commitment.V) + .collect(); + // See comment in `Dealer::new` for why we use `initial_transcript` - if proof.verify(rng, &mut self.initial_transcript).is_ok() { + if proof + .verify( + &value_commitments, + self.gens, + &mut self.initial_transcript, + rng, + self.n, + self.m, + ) + .is_ok() + { Ok(proof) } else { // Create a list of bad shares diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 9069c122..225d68d0 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -1,16 +1,21 @@ -use curve25519_dalek::ristretto::RistrettoPoint; -use curve25519_dalek::scalar::Scalar; -use inner_product_proof; +use std::iter; -use curve25519_dalek::ristretto; +use rand::Rng; + +use curve25519_dalek::ristretto::{self, RistrettoPoint}; +use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::IsIdentity; + +use inner_product_proof::{self, InnerProductProof}; use proof_transcript::ProofTranscript; -use rand::Rng; -use std::iter; +use generators::GeneratorsView; use util; #[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct ValueCommitment { + /// XXX when we change the aggregation API to allow proving about + /// preexisting commitments, this should go away (and just be an + /// input to the dealer), but until then it should be here. pub V: RistrettoPoint, pub A: RistrettoPoint, pub S: RistrettoPoint, @@ -122,11 +127,6 @@ impl ProofShare { #[derive(Serialize, Deserialize, Clone, Debug)] pub struct AggregatedProof { - pub n: usize, - /// Commitment to the value - // XXX this should not be included, so that we can prove about existing commitments - // included for now so that it's easier to test - pub value_commitments: Vec, /// Commitment to the bits of the value pub A: RistrettoPoint, /// Commitment to the blinding factors @@ -142,23 +142,23 @@ pub struct AggregatedProof { /// Blinding factor for the synthetic commitment to the inner-product arguments pub e_blinding: Scalar, /// Proof data for the inner-product argument. - pub ipp_proof: inner_product_proof::InnerProductProof, + pub ipp_proof: InnerProductProof, } impl AggregatedProof { - pub fn verify(&self, rng: &mut R, transcript: &mut ProofTranscript) -> Result<(), ()> { - use generators::{Generators, PedersenGenerators}; - - let n = self.n; - let m = self.value_commitments.len(); - - let generators = Generators::new(PedersenGenerators::default(), n, m); - let gen = generators.all(); - + pub fn verify( + &self, + value_commitments: &[RistrettoPoint], + gens: GeneratorsView, + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + m: usize, + ) -> Result<(), ()> { transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); - for V in self.value_commitments.iter() { + for V in value_commitments.iter() { transcript.commit(V.compress().as_bytes()); } transcript.commit(self.A.compress().as_bytes()); @@ -221,13 +221,13 @@ impl AggregatedProof { .chain(x_inv_sq.iter().cloned()), iter::once(&self.A) .chain(iter::once(&self.S)) - .chain(self.value_commitments.iter()) + .chain(value_commitments.iter()) .chain(iter::once(&self.T_1)) .chain(iter::once(&self.T_2)) - .chain(iter::once(&gen.pedersen_generators.B_blinding)) - .chain(iter::once(&gen.pedersen_generators.B)) - .chain(gen.G.iter()) - .chain(gen.H.iter()) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(iter::once(&gens.pedersen_generators.B)) + .chain(gens.G.iter()) + .chain(gens.H.iter()) .chain(self.ipp_proof.L_vec.iter()) .chain(self.ipp_proof.R_vec.iter()), ); diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 9d436774..7febad82 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -22,24 +22,27 @@ impl SinglePartyAggregator { /// with one party playing all roles. /// /// The length of `values` must be a power of 2. - /// - /// XXX this should allow proving about existing commitments. pub fn generate_proof( generators: &Generators, transcript: &mut ProofTranscript, rng: &mut R, values: &[u64], + blindings: &[Scalar], n: usize, ) -> Result { use self::dealer::*; use self::party::*; + if values.len() != blindings.len() { + return Err("mismatched values and blindings len"); + } + let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; let parties: Vec<_> = values .iter() - .map(|&v| { - let v_blinding = Scalar::random(rng); + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| { Party::new(v, v_blinding, n, &generators) }) // Collect the iterator of Results into a Result, then unwrap it @@ -73,10 +76,12 @@ impl SinglePartyAggregator { mod tests { use rand::OsRng; - use super::*; + use curve25519_dalek::ristretto::RistrettoPoint; use generators::PedersenGenerators; + use super::*; + /// Given a bitsize `n`, test the following: /// /// 1. Generate `m` random values and create a proof they are all in range; @@ -95,6 +100,7 @@ mod tests { // Serialized proof data let proof_bytes: Vec; + let value_commitments: Vec; // Prover's scope { @@ -105,17 +111,28 @@ mod tests { let (min, max) = (0u64, ((1u128 << n) - 1) as u64); let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); let proof = SinglePartyAggregator::generate_proof( &generators, &mut transcript, &mut rng, &values, + &blindings, n, ).unwrap(); // 2. Serialize proof_bytes = bincode::serialize(&proof).unwrap(); + + let pg = &generators.all().pedersen_generators; + + // XXX would be nice to have some convenience API for this + value_commitments = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .collect(); } println!( @@ -134,7 +151,18 @@ mod tests { let mut rng = OsRng::new().unwrap(); let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - assert!(proof.verify(&mut rng, &mut transcript).is_ok()); + assert!( + proof + .verify( + &value_commitments, + generators.all(), + &mut transcript, + &mut rng, + n, + m + ) + .is_ok() + ); } } From 2f47dfc443158f7c7ce2b9f6ae1b18d6d0796048 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 30 Apr 2018 11:32:37 -0700 Subject: [PATCH 149/186] update Rust nightly --- rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain b/rust-toolchain index 012938f6..3fbf18b7 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2018-04-03 +nightly-2018-04-29 From ffdd2a5f436538f71ae71898a191d20d7776374d Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 30 Apr 2018 11:55:49 -0700 Subject: [PATCH 150/186] try using travis's rust cargo cache --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 715f1b7a..061cda82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ language: rust +cache: cargo rust: - nightly From affa6b748873571ad9c6603587fd77a3d3624bac Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 30 Apr 2018 14:06:58 -0700 Subject: [PATCH 151/186] Fix the build and add benches to CI --- .travis.yml | 3 +++ benches/bulletproofs.rs | 21 ++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 715f1b7a..a7fef851 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,9 @@ rust: env: - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='' - TEST_COMMAND=test EXTRA_FLAGS='' FEATURES='yolocrypto' + # run cargo bench with a filter that matches no benchmarks. + # this ensures the benchmarks build but doesn't run them on the CI server. + - TEST_COMMAND=bench EXTRA_FLAGS='"DONTRUNBENCHMARKS"' FEATURES='yolocrypto' script: - cargo $TEST_COMMAND --features="$FEATURES" $EXTRA_FLAGS diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index c1597e0b..77dfb779 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -127,6 +127,7 @@ fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { let (min, max) = (0u64, ((1u128 << n) - 1) as u64); let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); b.iter(|| { // Each proof creation requires a clean transcript. @@ -137,6 +138,7 @@ fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { &mut transcript, &mut rng, &values, + &blindings, n, ) }) @@ -166,6 +168,7 @@ fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { let (min, max) = (0u64, ((1u128 << n) - 1) as u64); let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); let proof = SinglePartyAggregator::generate_proof( @@ -173,14 +176,30 @@ fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { &mut transcript, &mut rng, &values, + &blindings, n, ).unwrap(); + // XXX would be nice to have some convenience API for this + let pg = &generators.all().pedersen_generators; + let value_commitments: Vec<_> = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .collect(); + b.iter(|| { // Each proof creation requires a clean transcript. let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); - proof.verify(&mut rng, &mut transcript) + proof.verify( + &value_commitments, + generators.all(), + &mut transcript, + &mut rng, + n, + m, + ) }); }, &AGGREGATION_SIZES, From 30f22fb53d6be389142edbf84eacf50522879955 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 30 Apr 2018 14:57:54 -0700 Subject: [PATCH 152/186] convert _j to _(j) notation, and de-duplicate writing between single-party and aggregated proof notes --- docs/notes.md | 64 ++++++++++++++++++++++++++++----------------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 611c2de1..f39f257f 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -608,67 +608,73 @@ Aggregated Range Proof We want to create an aggregated range proof for `m` values (`m` parties) that is more efficient to create and verify than `m` individual range proofs. -The aggregated range proof has the same form as the individual range proof, but differs in that different parties are seperated by different powers of the challenge scalars `y` and `z`. +The aggregation protocol is a multi-party computation protocol, involving `m` parties and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. + +The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both `m` and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in `m`. We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is less complicated. + +For more information on how the aggregation protocol works and is implemented, see the [protocol notes](../aggregated_range_proof/index.html). + +The aggregated range proof has the same form as the individual range proof, in that the provers (the parties) still perform the same calculations to prove that \\(t_0, \mathbf{l}(x), \mathbf{r}(x)\\) are correct and that \\(t(x) = \langle \mathbf{l}(x), \mathbf{r}(x) \rangle \\). The difference is that the challenges values are obtained from the dealer, and that the calculations of different parties are seperated by different powers of the challenge scalars `y` and `z`. We will explain how one piece of the aggregated proof is generated for party `j`, and then will show how all of the pieces for all of the `m` parties can be combined into one aggregated proof. -Party `j` begins with a secret value \\(v_j\\), and wishes to convince the verifier that \\(v_j \in [0, 2^n)\\) without revealing \\(v_j\\). +Party `j` begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript in \\(v_{(j)}\\) denotes the `j`th party's value. Proving range statements with bit vectors ----------------------------------------- -Let \\({\mathbf{a}}\_{Lj}\\) be the vector of bits of \\(v_j\\). -Then \\(v_j\\) can be represented as: +Let \\({\mathbf{a}}\_{(j)L}\\) be the vector of bits of \\(v_{(j)}\\). +Then \\(v_{(j)}\\) can be represented as: \\[ \begin{aligned} - v_j &= {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} + v_{(j)} &= {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} \end{aligned} \\] -We need \\({\mathbf{a}}\_{Lj}\\) to be a vector of integers \\(\\{0,1\\}\\). +We need \\({\mathbf{a}}\_{(j)L}\\) to be a vector of integers \\(\\{0,1\\}\\). This can be expressed with additional conditions: \\[ \begin{aligned} - {\mathbf{a}}\_{Lj} \circ {\mathbf{a}}\_{Rj} &= {\mathbf{0}} \\\\ - ({\mathbf{a}}\_{Lj} - {\mathbf{1}}) - {\mathbf{a}}\_{Rj} &= {\mathbf{0}} + {\mathbf{a}}\_{(j)L} \circ {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} \\\\ + ({\mathbf{a}}\_{(j)L} - {\mathbf{1}}) - {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} \end{aligned} \\] Proving vectors of statements with a single statement ----------------------------------------------------- -We want to combine the above three statements into a single statement for party `j`. We will also introduce challenge values \\(y_j\\) and \\(z_j\\) that are unique to each party `j`, and use them to help combine the statements. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. +We want to combine the above three statements into a single statement for party `j`. We will also introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`, and use them to help combine the statements. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. -First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by \\(y_j = y^{j \cdot n}\\) that is unique to each party `j`: +First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} = \mathbf{y}^n \cdot y^{j \cdot n} \\) that is unique to each party `j`: \\[ \begin{aligned} - {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} &= v \\\\ - {\langle {\mathbf{a}}\_{Lj} - {\mathbf{1}} - {\mathbf{a}}\_{Rj}, {\mathbf{y}}^{n} \cdot y_j \rangle} &= 0 \\\\ - {\langle {\mathbf{a}}\_{Lj}, {\mathbf{a}}\_{Rj} \circ {\mathbf{y}}^{n} \cdot y_j \rangle} &= 0 + {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v \\\\ + {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \\\\ + {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{a}}\_{(j)R} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \end{aligned} \\] The three resulting statements can then be combined in the same way, -using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by \\(z_j = z^j\\) that is unique to each party `j`: +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by \\(z\_{(j)} = z^j\\) that is unique to each party `j`: \\[ \begin{aligned} -z^{2} z_j \cdot v +z^{2} z\_{(j)} \cdot v &= - z^{2} z_j \cdot {\langle {\mathbf{a}}\_{Lj}, {\mathbf{2}}^{n} \rangle} \\\\ - &+ z \cdot {\langle {\mathbf{a}}\_{Lj} - {\mathbf{1}} - {\mathbf{a}}\_{Rj}, {\mathbf{y}}^{n} \cdot y_j \rangle} \\\\ - &+ {\langle {\mathbf{a}}\_{Lj}, {\mathbf{a}}\_{Rj} \circ {\mathbf{y}}^{n} \cdot y_j \rangle} + z^{2} z\_{(j)} \cdot {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} \\\\ + &+ z \cdot {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} \\\\ + &+ {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{a}}\_{(j)R} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} \end{aligned} \\] Combining inner products ------------------------ -We combine the terms in the last statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here, only the end result: +We combine the terms in the last statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here since it is the same as in the [combining inner products](index.html#combining-inner-products) step of the single-value proof. Here is the end result: \\[ \begin{aligned} - \delta_j(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \rangle} - z^{3} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ - z^{2}z_j \cdot v + \delta_j(y,z) &= {\langle {\mathbf{a}}\_{Lj} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{Rj} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} + \delta_{(j)}(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ + z^{2}z_{(j)} \cdot v + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} \end{aligned} \\] @@ -677,13 +683,13 @@ Blinding the inner product The prover chooses vectors of blinding factors \\[ -{\mathbf{s}}\_{Lj}, {\mathbf{s}}\_{Rj} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n} +{\mathbf{s}}\_{(j)L}, {\mathbf{s}}\_{(j)R} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n} \\] and uses them to construct blinded vector polynomials: \\[ \begin{aligned} - {\mathbf{l}}\_j(x) &= ({\mathbf{a}}\_{Lj} + {\mathbf{s}}\_{Lj} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ - {\mathbf{r}}\_j(x) &= {\mathbf{y}}^{n} \cdot y_j \circ \left( ({\mathbf{a}}\_{Rj} + {\mathbf{s}}\_{Rj} x\right) + z {\mathbf{1}}) + z^{2} z_j {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} + {\mathbf{l}}\_j(x) &= ({\mathbf{a}}\_{(j)L} + {\mathbf{s}}\_{Lj} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}\_j(x) &= {\mathbf{y}}^{n} \cdot y_j \circ \left( ({\mathbf{a}}\_{(j)R} + {\mathbf{s}}\_{Rj} x\right) + z {\mathbf{1}}) + z^{2} z_j {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} \end{aligned} \\] The vector polynomials \\({\mathbf{l}}\_j(x)\\) and \\({\mathbf{r}}\_j(x)\\) are related to \\(t(x)\\) by the following equations: @@ -692,7 +698,7 @@ The vector polynomials \\({\mathbf{l}}\_j(x)\\) and \\({\mathbf{r}}\_j(x)\\) are t_j(x) &= {\langle {\mathbf{l}}\_j(x), {\mathbf{r}}\_j(x) \rangle} \\\\ &= t\_{j0} + t\_{j1} x + t\_{j2} x^{2} \\\\ t\_{j0} &= {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j0} \rangle} \\\\ - &= {\langle {\mathbf{a}}\_{Lj} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{Rj} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} \\\\ + &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} \\\\ t\_{j1} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j0} \rangle} + {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j1} \rangle} \\\\ t\_{j2} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j1} \rangle} \\\\ \end{aligned} @@ -734,7 +740,7 @@ Proving that \\(t\_{j0}\\) is correct requires first creating commitments to the \\[ \begin{aligned} - t_j(x) B + {\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 z_j V_j + \delta_j(y,z) B + x T\_{j1} + x^{2} T\_{j2}\\\\ + t_j(x) B + {\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 z_j V_{(j)} + \delta_j(y,z) B + x T\_{j1} + x^{2} T\_{j2}\\\\ \delta_j(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \rangle} - z^{3} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} \end{aligned} \\] @@ -743,7 +749,7 @@ If we combine all of the statements about \\(t\_{j0}\\) from all of the `j` part \\[ \begin{aligned} - \sum_{j=0}^{m-1}t_j(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_j + \sum_{j=0}^{m-1} \delta_j(y,z) B + x \sum_{j=0}^{m-1} T\_{j1} + x^{2} \sum_{j=0}^{m-1} T\_{j2} + \sum_{j=0}^{m-1}t_j(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_{(j)} + \sum_{j=0}^{m-1} \delta_j(y,z) B + x \sum_{j=0}^{m-1} T\_{j1} + x^{2} \sum_{j=0}^{m-1} T\_{j2} \end{aligned} \\] @@ -764,7 +770,7 @@ Now instead of having to do `m` individual checks to prove that \\(t\_{j0}\\) fo \\[ \begin{aligned} - t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_j + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} \\] @@ -773,7 +779,7 @@ Since we know that \\(z_j = z^j\\), we can rewrite the equation as follows: \\[ \begin{aligned} - t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} \sum_{j=0}^{m-1} z^{j+2} V_j + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} \sum_{j=0}^{m-1} z^{j+2} V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - \sum_{j=0}^{m-1} z^{j+3} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} \\] From 1d8d43f5e15c6920ab5c3372431b0240ce14d7cb Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 30 Apr 2018 16:04:29 -0700 Subject: [PATCH 153/186] fixing up to l(x), r(x) --- docs/notes.md | 91 ++++++++++++++++++++++++++------------------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index f39f257f..08c22bee 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -642,9 +642,10 @@ This can be expressed with additional conditions: Proving vectors of statements with a single statement ----------------------------------------------------- -We want to combine the above three statements into a single statement for party `j`. We will also introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`, and use them to help combine the statements. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. +We want to combine the above three statements into a single statement for party `j`, as we do in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. -First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} = \mathbf{y}^n \cdot y^{j \cdot n} \\) that is unique to each party `j`: +First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties +, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), a length `n` slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party `j`: \\[ \begin{aligned} @@ -655,7 +656,7 @@ First, we will combine each of the two vector-statements into a single statement \\] The three resulting statements can then be combined in the same way, -using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by \\(z\_{(j)} = z^j\\) that is unique to each party `j`: +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} = z^j\\) that is unique to each party `j`: \\[ \begin{aligned} z^{2} z\_{(j)} \cdot v @@ -681,26 +682,12 @@ We combine the terms in the last statement into a single inner product, using th Blinding the inner product -------------------------- -The prover chooses vectors of blinding factors -\\[ -{\mathbf{s}}\_{(j)L}, {\mathbf{s}}\_{(j)R} \\;{\xleftarrow{\\$}}\\; {\mathbb Z\_p}^{n} -\\] -and uses them to construct blinded vector polynomials: -\\[ -\begin{aligned} - {\mathbf{l}}\_j(x) &= ({\mathbf{a}}\_{(j)L} + {\mathbf{s}}\_{Lj} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ - {\mathbf{r}}\_j(x) &= {\mathbf{y}}^{n} \cdot y_j \circ \left( ({\mathbf{a}}\_{(j)R} + {\mathbf{s}}\_{Rj} x\right) + z {\mathbf{1}}) + z^{2} z_j {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} -\end{aligned} -\\] -The vector polynomials \\({\mathbf{l}}\_j(x)\\) and \\({\mathbf{r}}\_j(x)\\) are related to \\(t(x)\\) by the following equations: +The prover chooses vectors of blinding factors \\( \mathbf{s}\_{(j)L}, {\mathbf{s}}\_{(j)R} \\), and uses them to construct the blinded vector polynomials \\(\mathbf{l}\_{(j)}(x), \mathbf{r}\_{(j)}(x)\\). We will not reproduce the steps or the explanation here since it is the same as in the [blinding the inner product](index.html#blinding-the-inner-product) step of the single-value proof. Here are the final equations for the vector polynomials: + \\[ \begin{aligned} - t_j(x) &= {\langle {\mathbf{l}}\_j(x), {\mathbf{r}}\_j(x) \rangle} \\\\ - &= t\_{j0} + t\_{j1} x + t\_{j2} x^{2} \\\\ - t\_{j0} &= {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j0} \rangle} \\\\ - &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_j \cdot {\mathbf{2}}^{n} \rangle} \\\\ - t\_{j1} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j0} \rangle} + {\langle {\mathbf{l}}\_{j0}, {\mathbf{r}}\_{j1} \rangle} \\\\ - t\_{j2} &= {\langle {\mathbf{l}}\_{j1}, {\mathbf{r}}\_{j1} \rangle} \\\\ + {\mathbf{l}}\_{(j)}(x) &= ({\mathbf{a}}\_{(j)L} + {\mathbf{s}}\_{(j)L} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}\_{(j)}(x) &= {\mathbf{y}}^{n}\_{(j)} \circ \left( ({\mathbf{a}}\_{(j)R} + {\mathbf{s}}\_{(j)R} x\right) + z {\mathbf{1}}) + z^{2} z_{(j)} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} \end{aligned} \\] @@ -708,20 +695,20 @@ Proving that \\(t(x)\\) is correct ---------------------------------- Proving that \\(t\_j(x)\\) is correct means proving that -\\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) are correctly formed, and that -\\(t_j(x) = {\langle {\mathbf{l}}\_j(x), {\mathbf{r}}\_j(x) \rangle}\\). +\\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correctly formed, and that +\\(t\_{(j)}(x) = {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{r}}\_{(j)}(x) \rangle}\\). -We can combine the statements about \\(t_j(x)\\), \\({\mathbf{l}}\_j(x)\\), and \\({\mathbf{r}}\_j(x)\\) from all `m` parties in the following manner: +We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) from all `m` parties in the following manner: \\[ \begin{aligned} - t(x) &= \sum_{j=0}^{m-1} t_j(x)\\\\ - {\mathbf{l}}(x) &= {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x) \\\\ - {\mathbf{r}}(x) &= {\mathbf{r}}\_{j=0}(x) || {\mathbf{r}}\_{j=1}(x) || \dots || {\mathbf{r}}\_{j=m-1}(x) \\\\ + t(x) &= \sum_{j=0}^{m-1} t\_{(j)}(x)\\\\ + {\mathbf{l}}(x) &= {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x) \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ \end{aligned} \\] -We can add the \\(t_j(x)\\) values together to create \\(t(x)\\) because each \\(t_j(x)\\) is calculated using the \\(y_j\\) and \\(z_j\\) challenge variables that are unique to each party `j`, so all of the \\(t_j(x)\\) values will be offset from one another. +We can add the \\(t_j(x)\\) values together to create \\(t(x)\\) because each \\(t_j(x)\\) is calculated using the \\(\mathbf{y}^n\_{(j)}\\) and \\(z\_{(j)}\\) challenge variables that are unique to each party `j`, so all of the \\(t\_{(j)}(x)\\) values will be offset from one another. Now instead of having to do `m` individual checks to prove that \\(t_j(x)\\), \\({\mathbf{l}}\_j(x)\\), and \\({\mathbf{r}}\_j(x)\\) for all parties `j` are correct, we can do the verification with one check: @@ -731,21 +718,21 @@ Now instead of having to do `m` individual checks to prove that \\(t_j(x)\\), \\ \end{aligned} \\] -We can do this check using the inner product proof, in the same way the single-value range proof uses the inner product proof. +We can do this check using the [inner product proof](index.html#inner-product-proof), in the same way the single-value range proof uses the inner product proof. Proving that \\(t_0\\) is correct --------------------------------- -Proving that \\(t\_{j0}\\) is correct requires first creating commitments to the variables, and then proving the following relation (for an explanation of how the commitments are created and how the relation is derived, see the single-value range proof notes): +Proving that \\(t\_{(j)0}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The relation over the commitments to prove is: \\[ \begin{aligned} - t_j(x) B + {\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 z_j V_{(j)} + \delta_j(y,z) B + x T\_{j1} + x^{2} T\_{j2}\\\\ - \delta_j(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n} \cdot y_j \rangle} - z^{3} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} + t\_{(j)}(x) B + {\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 z\_{(j)} V_{(j)} + \delta\_{(j)}(y,z) B + x T\_{(j)1} + x^{2} T\_{(j)2}\\\\ + \delta\_{(j)}(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} \end{aligned} \\] -If we combine all of the statements about \\(t\_{j0}\\) from all of the `j` parties by adding them together, then we get: +If we combine all of the statements about \\(t\_{(j)0}\\) from all of the `j` parties by adding them together, then we get: \\[ \begin{aligned} @@ -753,29 +740,45 @@ If we combine all of the statements about \\(t\_{j0}\\) from all of the `j` part \end{aligned} \\] -We can combine the party-specifc values in the following manner: +We can combine the values and commitments by summing them directly. We can do this instead of having to take a linear combination, because each party's values and commitments are already offset by the values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to that party. \\[ \begin{aligned} - t(x) &= \sum_{j=0}^{m-1} t_j(x)\\\\ - {\tilde{t}}(x) &= \sum_{j=0}^{m-1}{\tilde{t}}\_j(x)\\\\ - T_1 &= \sum_{j=0}^{m-1} T_{j1}\\\\ - T_2 &= \sum_{j=0}^{m-1} T_{j2}\\\\ - \delta(y,z) &= \sum_{j=0}^{m-1} \delta_j(y,z)\\\\ - &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ + t(x) &= \sum_{j=0}^{m-1} t\_{(j)}(x)\\\\ + {\tilde{t}}(x) &= \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x)\\\\ + T_1 &= \sum_{j=0}^{m-1} T_{(j)1}\\\\ + T_2 &= \sum_{j=0}^{m-1} T_{(j)2}\\\\ + \delta(y,z) &= \sum_{j=0}^{m-1} \delta\_{(j)}(y,z)\\\\ + &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} \\] -Now instead of having to do `m` individual checks to prove that \\(t\_{j0}\\) for all parties `j` are correct, we can do the verification with one check using the combined values: +Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), we can simplify \\(\delta(y, z)\\): + +\\[ +\begin{aligned} + \delta(y, z) &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ + &= (z - z^{2}) \cdot ( + {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[0 : n - 1]} \rangle + + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[n : 2 \cdot n - 1]} \rangle + + \dots + + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[m \cdot n : (m+1) \cdot n - 1]} \rangle}) - + z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ + &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ +\end{aligned} +\\] + + +Now instead of having to do `m` individual checks to prove that \\(t\_{(j)0}\\) for all parties `j` are correct, we can do the verification with one check using the combined values: \\[ \begin{aligned} - t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ - \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z_j \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ + t(x) B + {\tilde{t}}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z\_{(j)} V_{(j)} + \delta(y,z) B + x T\_{1} + x^{2} T\_{2},\\\\ + \delta(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} \\] -Since we know that \\(z_j = z^j\\), we can rewrite the equation as follows: +Since we know that \\(z\_{(j)} = z^j\\), we can rewrite the equation as follows: \\[ \begin{aligned} From 4ccc66fc536944e91df7af795b40c1e216b6618a Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 30 Apr 2018 17:19:43 -0700 Subject: [PATCH 154/186] some fix-ups and debugging --- docs/notes.md | 63 ++++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 08c22bee..0d36b689 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -606,19 +606,19 @@ additional and final step involves sending a pair of scalars Aggregated Range Proof ====================== -We want to create an aggregated range proof for `m` values (`m` parties) that is more efficient to create and verify than `m` individual range proofs. +We want to create an aggregated range proof for `m` values that is more efficient to create and verify than `m` individual range proofs. -The aggregation protocol is a multi-party computation protocol, involving `m` parties and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. +The aggregation protocol is a multi-party computation protocol, involving `m` parties (one party per value) and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. -The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both `m` and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in `m`. We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is less complicated. +The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both `m` and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in `m`. We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is more straightforward. For more information on how the aggregation protocol works and is implemented, see the [protocol notes](../aggregated_range_proof/index.html). -The aggregated range proof has the same form as the individual range proof, in that the provers (the parties) still perform the same calculations to prove that \\(t_0, \mathbf{l}(x), \mathbf{r}(x)\\) are correct and that \\(t(x) = \langle \mathbf{l}(x), \mathbf{r}(x) \rangle \\). The difference is that the challenges values are obtained from the dealer, and that the calculations of different parties are seperated by different powers of the challenge scalars `y` and `z`. +The aggregated range proof has the same form as the individual range proof, in that the provers (the parties) still perform the same calculations to prove that \\(t(x) = \langle \mathbf{l}(x), \mathbf{r}(x) \rangle \\) and that \\(t_0, \mathbf{l}(x), \mathbf{r}(x)\\) are correct. The difference is that the challenge values are obtained from the dealer, which generates them by combining commitments from all the parties, and that the calculations of different parties are seperated by different powers of the challenge scalars \\(y\\) and \\(z\\). We will explain how one piece of the aggregated proof is generated for party `j`, and then will show how all of the pieces for all of the `m` parties can be combined into one aggregated proof. -Party `j` begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript in \\(v_{(j)}\\) denotes the `j`th party's value. +Party `j` begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript \\({(j)}\\) denotes the `j`th party's value. Proving range statements with bit vectors ----------------------------------------- @@ -642,7 +642,7 @@ This can be expressed with additional conditions: Proving vectors of statements with a single statement ----------------------------------------------------- -We want to combine the above three statements into a single statement for party `j`, as we do in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. +We want to combine the above three statements into a single statement for party `j`, as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties , and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), a length `n` slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party `j`: @@ -670,7 +670,7 @@ z^{2} z\_{(j)} \cdot v Combining inner products ------------------------ -We combine the terms in the last statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here since it is the same as in the [combining inner products](index.html#combining-inner-products) step of the single-value proof. Here is the end result: +We combine the terms in the preceding statement into a single inner product, using the same technique as in the single-value range proof. We will not reproduce the math here since it is the same as in the [combining inner products](index.html#combining-inner-products) step of the single-value proof. Here is the end result: \\[ \begin{aligned} @@ -694,7 +694,7 @@ The prover chooses vectors of blinding factors \\( \mathbf{s}\_{(j)L}, {\mathbf{ Proving that \\(t(x)\\) is correct ---------------------------------- -Proving that \\(t\_j(x)\\) is correct means proving that +Proving that \\(t\_{(j)}(x)\\) is correct means proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correctly formed, and that \\(t\_{(j)}(x) = {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{r}}\_{(j)}(x) \rangle}\\). @@ -708,9 +708,9 @@ We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x) \end{aligned} \\] -We can add the \\(t_j(x)\\) values together to create \\(t(x)\\) because each \\(t_j(x)\\) is calculated using the \\(\mathbf{y}^n\_{(j)}\\) and \\(z\_{(j)}\\) challenge variables that are unique to each party `j`, so all of the \\(t\_{(j)}(x)\\) values will be offset from one another. +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) because each \\(t_{(j)}(x)\\) is calculated using the \\(\mathbf{y}^n\_{(j)}\\) and \\(z\_{(j)}\\) challenge variables that are unique to each party `j`, so all of the \\(t\_{(j)}(x)\\) values will be offset from one another. -Now instead of having to do `m` individual checks to prove that \\(t_j(x)\\), \\({\mathbf{l}}\_j(x)\\), and \\({\mathbf{r}}\_j(x)\\) for all parties `j` are correct, we can do the verification with one check: +Now instead of having to do `m` individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties `j` are correct, we can do the verification with one check: \\[ \begin{aligned} @@ -736,7 +736,7 @@ If we combine all of the statements about \\(t\_{(j)0}\\) from all of the `j` pa \\[ \begin{aligned} - \sum_{j=0}^{m-1}t_j(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_j(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_j V_{(j)} + \sum_{j=0}^{m-1} \delta_j(y,z) B + x \sum_{j=0}^{m-1} T\_{j1} + x^{2} \sum_{j=0}^{m-1} T\_{j2} + \sum_{j=0}^{m-1}t_{(j)}(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_{(j)} V_{(j)} + \sum_{j=0}^{m-1} \delta_{(j)}(y,z) B + x \sum_{j=0}^{m-1} T\_{(j)1} + x^{2} \sum_{j=0}^{m-1} T\_{(j)2} \end{aligned} \\] @@ -757,12 +757,11 @@ Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n \\[ \begin{aligned} - \delta(y, z) &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ - &= (z - z^{2}) \cdot ( + \delta(y, z) &= (z - z^{2}) \cdot ( {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[0 : n - 1]} \rangle + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[n : 2 \cdot n - 1]} \rangle + \dots + - \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[m \cdot n : (m+1) \cdot n - 1]} \rangle}) - + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n - 1]} \rangle}) - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ \end{aligned} @@ -790,21 +789,21 @@ Since we know that \\(z\_{(j)} = z^j\\), we can rewrite the equation as follows: Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct --------------------------------------------------------------------- -Proving that \\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) are correct requires first creating commitments to the variables, and then proving the following relation (for an explanation of how the commitments are created and how the relation is derived, see the single-value range proof notes): +Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct](index.html#proving-that-mathbflx-mathbfrx-are-correct) step of the single-value range proof. The relation over the commitments to prove is: \\[ \begin{aligned} - {\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} + {\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} \stackrel{?}{=} -{\widetilde{e}\_j} {\widetilde{B}} + A_j + x S_j - z{\langle {\mathbf{1}}, {\mathbf{G}\_j} \rangle} + {\langle z {\mathbf{1}} + z^2 z_j \cdot {\mathbf{y}^{-n}} y_j^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} + {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + {\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} \stackrel{?}{=} -{\widetilde{e}\_{(j)}} {\widetilde{B}} + A_{(j)} + x S_{(j)} - z{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + {\langle z {\mathbf{1}} + z^2 z_{(j)} \cdot (\mathbf{y}^{n}\_{(j)})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_{(j)}} \rangle} \end{aligned} \\] -Where \\({\mathbf{G}\_j}\\) is party `j`'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_j}\\) is party `j`'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). +\\({\mathbf{G}\_{(j)}}\\) is party `j`'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_{(j)}}\\) is party `j`'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). -If we combine all the statements about \\({\mathbf{l}}\_j(x)\\), \\({\mathbf{r}}\_j(x)\\) from all the `m` parties by adding them together, then we get: +If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the `j` parties by adding them together, then we get: \\[ \begin{aligned} - \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} \stackrel{?}{=} -\sum_{j=0}^{m-1}{\widetilde{e}\_j} {\widetilde{B}} + \sum_{j=0}^{m-1}A_j + x \sum_{j=0}^{m-1}S_j - z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_j} \rangle} + \sum_{j=0}^{m-1}{\langle z {\mathbf{1}} + z^2 z_j \cdot {\mathbf{y}^{-n}} y_j^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} \stackrel{?}{=} -\sum_{j=0}^{m-1}{\widetilde{e}\_{(j)}} {\widetilde{B}} + \sum_{j=0}^{m-1}A_{(j)} + x \sum_{j=0}^{m-1}S_{(j)} - z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + \sum_{j=0}^{m-1}{\langle z {\mathbf{1}} + z^2 z_{(j)} \cdot (\mathbf{y}^{n}\_{(j)})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_{(j)}} \rangle} \end{aligned} \\] @@ -812,10 +811,12 @@ We can simplify this expression by making a few observations. We know that: \\[ \begin{aligned} - {\mathbf{l}}(x) &= {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x) \\\\ - {\mathbf{G}} &= {\mathbf{G}}\_{0} || {\mathbf{G}}\_{1} || \dots || {\mathbf{G}}\_{m-1} \\\\ - y_j &= y^{j \cdot n} \\\\ - z_j &= z^j + {\mathbf{l}}(x) &= {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x) \\\\ + {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ + {\mathbf{G}} &= {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \\\\ + {\mathbf{H}} &= {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \\\\ + \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\\\ + z_{(j)} &= z^j \end{aligned} \\] @@ -823,21 +824,21 @@ Therefore, we can simplify the following statements: \\[ \begin{aligned} - \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_j(x), {\mathbf{G}\_j} \rangle} &= {\langle {\mathbf{l}}\_{j=0}(x) || {\mathbf{l}}\_{j=1}(x) || \dots || {\mathbf{l}}\_{j=m-1}(x), {\mathbf{G}}\_{0} || {\mathbf{G}}\_{1} || \dots || {\mathbf{G}}\_{m-1} \rangle} \\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} &= {\langle {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \rangle} \\\\ &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} \\\\ - \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_j(x) \circ {\mathbf{y}^{-n}} \cdot y_j^{-1}, {\mathbf{H}}\_j \rangle} &= {\langle {\mathbf{r}}\_{j=0}(x) \circ {\mathbf{y}^{-n}} y_0^{-1} || {\mathbf{r}}\_{j=1}(x) \circ {\mathbf{y}^{-n}} y_1^{-1} || \dots || {\mathbf{r}}\_{j=m-1}(x) \circ {\mathbf{y}^{-n}} y_{m-1}^{-1}, {\mathbf{H}}\_{0} || {\mathbf{H}}\_{1} || \dots || {\mathbf{H}}\_{m-1} \rangle}\\\\ - &= {\langle {\mathbf{r}}\_{j=0}(x) \circ {\mathbf{y}^{-n}} y^{0 \cdot n} || {\mathbf{r}}\_{j=1}(x) \circ {\mathbf{y}^{-n}} y^{-1 \cdot n} || \dots || {\mathbf{r}}\_{j=m-1}(x) \circ {\mathbf{y}^{-n}} y^{-(m-1)\cdot n}, {\mathbf{H}}\_{0} || {\mathbf{H}}\_{1} || \dots || {\mathbf{H}}\_{m-1} \rangle}\\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ + &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[0 : n - 1]})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[ n : 2 \cdot n - 1]})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n - 1]})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ &= {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}}, {\mathbf{H}} \rangle} \end{aligned} \\] -We can also combine some party-specific values in the following manner: +We can combine the values and commitments from all the `m` parties by summing them directly: \\[ \begin{aligned} - {\widetilde{e}} &= \sum_{j=0}^{m-1} {\widetilde{e}\_j} \\\\ - A &= \sum_{j=0}^{m-1} A_j \\\\ - S &= \sum_{j=0}^{m-1} S_j \\\\ + {\widetilde{e}} &= \sum_{j=0}^{m-1} {\widetilde{e}\_{(j)}} \\\\ + A &= \sum_{j=0}^{m-1} A_{(j)} \\\\ + S &= \sum_{j=0}^{m-1} S_{(j)} \\\\ \end{aligned} \\] @@ -845,7 +846,7 @@ With these observations, we can simplify the combined `m`-party statement about \\[ \begin{aligned} - {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{y}^{-n}} y^{-j\cdot n} \circ {\mathbf{2}}^n, {\mathbf{H}\_j} \rangle} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot (\mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}}\_{(j)} \rangle} \end{aligned} \\] From 55ea35ee3d067cb08e2403a1f631237319ba9b26 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 30 Apr 2018 17:27:30 -0700 Subject: [PATCH 155/186] add some more clarifying steps for summation --- docs/notes.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 0d36b689..b8b95b30 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -824,9 +824,17 @@ Therefore, we can simplify the following statements: \\[ \begin{aligned} - \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} &= {\langle {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \rangle} \\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} &= {\langle {\mathbf{l}}\_{(0)}(x), {\mathbf{G}}\_{(0)} \rangle} + + {\langle {\mathbf{l}}\_{(1)}(x), {\mathbf{G}}\_{(1)} \rangle} + + \dots + + {\langle {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(m-1)} \rangle}\\\\ + &= {\langle {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \rangle} \\\\ &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} \\\\ - \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1}, {\mathbf{H}}\_{(0)} \rangle} + + {\langle {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1}, {\mathbf{H}}\_{(1)} \rangle} + + \dots + + {\langle {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(m-1)} \rangle} \\\\ + &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[0 : n - 1]})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[ n : 2 \cdot n - 1]})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n - 1]})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ &= {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}}, {\mathbf{H}} \rangle} \end{aligned} From 6f4c3fb565277db679083fbb02eecb9b7d1fd1e3 Mon Sep 17 00:00:00 2001 From: Cathie Date: Mon, 30 Apr 2018 17:34:34 -0700 Subject: [PATCH 156/186] clarify wording --- docs/notes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/notes.md b/docs/notes.md index b8b95b30..181792b8 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -708,7 +708,7 @@ We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x) \end{aligned} \\] -We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) because each \\(t_{(j)}(x)\\) is calculated using the \\(\mathbf{y}^n\_{(j)}\\) and \\(z\_{(j)}\\) challenge variables that are unique to each party `j`, so all of the \\(t\_{(j)}(x)\\) values will be offset from one another. +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party`j`, so all of the \\(t_{(j)}(x)\\) values will be offset from one another. Now instead of having to do `m` individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties `j` are correct, we can do the verification with one check: From b5f7c949c57d5050f76025afbd244e43259a80c3 Mon Sep 17 00:00:00 2001 From: Cathie Date: Tue, 1 May 2018 11:16:33 -0700 Subject: [PATCH 157/186] touchups --- docs/notes.md | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 181792b8..ec5dfd4a 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -623,17 +623,11 @@ Party `j` begins with a secret value \\(v_{(j)}\\), and wishes to convince the v Proving range statements with bit vectors ----------------------------------------- -Let \\({\mathbf{a}}\_{(j)L}\\) be the vector of bits of \\(v_{(j)}\\). -Then \\(v_{(j)}\\) can be represented as: -\\[ -\begin{aligned} - v_{(j)} &= {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} -\end{aligned} -\\] -We need \\({\mathbf{a}}\_{(j)L}\\) to be a vector of integers \\(\\{0,1\\}\\). -This can be expressed with additional conditions: +We want to make statements about \\(v_{(j)}\\) using its bit vector representation, where the statements will be true if and only if \\(v_{(j)}\\) is actually in the expected range. We will not reproduce the steps or explanation here since it is the same as in the [proving range statements with bit vectors](index.html#proving-range-statements-with-bit-vectors) step of the single-value range proof. Here are the final statements for party `j`: + \\[ \begin{aligned} + {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ {\mathbf{a}}\_{(j)L} \circ {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} \\\\ ({\mathbf{a}}\_{(j)L} - {\mathbf{1}}) - {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} \end{aligned} @@ -649,7 +643,7 @@ First, we will combine each of the two vector-statements into a single statement \\[ \begin{aligned} - {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v \\\\ + {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \\\\ {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{a}}\_{(j)R} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \end{aligned} @@ -659,7 +653,7 @@ The three resulting statements can then be combined in the same way, using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} = z^j\\) that is unique to each party `j`: \\[ \begin{aligned} -z^{2} z\_{(j)} \cdot v +z^{2} z\_{(j)} \cdot v_{(j)} &= z^{2} z\_{(j)} \cdot {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} \\\\ &+ z \cdot {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} \\\\ @@ -675,7 +669,7 @@ We combine the terms in the preceding statement into a single inner product, usi \\[ \begin{aligned} \delta_{(j)}(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ - z^{2}z_{(j)} \cdot v + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} + z^{2}z_{(j)} \cdot v_{(j)} + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} \end{aligned} \\] @@ -854,7 +848,7 @@ With these observations, we can simplify the combined `m`-party statement about \\[ \begin{aligned} - {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot (\mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}}\_{(j)} \rangle} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot (\mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}}\_{[j \cdot n : (j+1) \cdot n - 1]} \rangle} \end{aligned} \\] From adeb7446a97d05287f119839c70ed95cb6a070ab Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 11:27:04 -0700 Subject: [PATCH 158/186] Revert nightly version per https://github.com/dalek-cryptography/curve25519-dalek/issues/133 --- rust-toolchain | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain b/rust-toolchain index 3fbf18b7..9ee2e268 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2018-04-29 +nightly-2018-04-16 From 3928380872e982a9b064e793de70080787b8ff07 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 10:50:25 -0700 Subject: [PATCH 159/186] [fmt] cargo fmt --- src/aggregated_range_proof/messages.rs | 2 +- src/proof_transcript.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 225d68d0..a8797578 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -6,9 +6,9 @@ use curve25519_dalek::ristretto::{self, RistrettoPoint}; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::IsIdentity; +use generators::GeneratorsView; use inner_product_proof::{self, InnerProductProof}; use proof_transcript::ProofTranscript; -use generators::GeneratorsView; use util; #[derive(Serialize, Deserialize, Copy, Clone, Debug)] diff --git a/src/proof_transcript.rs b/src/proof_transcript.rs index 9c3ad6b5..d7b48aa0 100644 --- a/src/proof_transcript.rs +++ b/src/proof_transcript.rs @@ -79,7 +79,7 @@ impl ProofTranscript { let mut ro = ProofTranscript { // NOTE: if you change the security parameter, also change the rate below hash: Keccak::new_shake128(), - rate: 1600/8 - (2*128/8), // 168 bytes + rate: 1600 / 8 - (2 * 128 / 8), // 168 bytes write_offset: 0, }; // We will bump the version prefix each time we @@ -288,7 +288,6 @@ mod tests { } } - #[test] fn outputs_are_disambiguated_by_length_prefix() { let mut ro = ProofTranscript::new(b"TestProtocol"); From f796308536c9c0d1a310501ff7a43e5c51fbde91 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 12:52:58 -0700 Subject: [PATCH 160/186] Unify aggregated and non-aggregated range proofs. This removes the `AggregatedProof` struct entirely and moves its validation logic into the `RangeProof` struct, which now handles both aggregated and unaggregated proofs. The former `RangeProof::verify` is now `RangeProof::verify_single`, a convenience wrapper. --- benches/bulletproofs.rs | 2 +- src/aggregated_range_proof/dealer.rs | 14 ++- src/aggregated_range_proof/messages.rs | 130 +------------------------ src/aggregated_range_proof/mod.rs | 7 +- src/range_proof.rs | 109 +++++++++++---------- 5 files changed, 70 insertions(+), 192 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index 77dfb779..10734300 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -63,7 +63,7 @@ fn bench_verify_helper(n: usize, c: &mut Criterion) { // Each verification requires a clean transcript. let mut transcript = ProofTranscript::new(b"RangeproofTest"); - rp.verify(&vc, generators.share(0), &mut transcript, &mut rng, n) + rp.verify_single(&vc, generators.share(0), &mut transcript, &mut rng, n) }); }); } diff --git a/src/aggregated_range_proof/dealer.rs b/src/aggregated_range_proof/dealer.rs index 16ef4bed..a719225b 100644 --- a/src/aggregated_range_proof/dealer.rs +++ b/src/aggregated_range_proof/dealer.rs @@ -7,6 +7,7 @@ use curve25519_dalek::traits::Identity; use generators::GeneratorsView; use inner_product_proof; use proof_transcript::ProofTranscript; +use range_proof::RangeProof; use util; @@ -170,15 +171,12 @@ pub struct DealerAwaitingProofShares<'a, 'b> { } impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { - /// Assembles proof shares into an `AggregatedProof`. + /// Assembles proof shares into an `RangeProof`. /// /// Used as a helper function by `receive_trusted_shares` (which /// just hands back the result) and `receive_shares` (which /// validates the proof shares. - fn assemble_shares( - &mut self, - proof_shares: &[ProofShare], - ) -> Result { + fn assemble_shares(&mut self, proof_shares: &[ProofShare]) -> Result { if self.m != proof_shares.len() { return Err("Length of proof shares doesn't match expected length m"); } @@ -240,7 +238,7 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { r_vec.clone(), ); - Ok(AggregatedProof { + Ok(RangeProof { A, S, T_1, @@ -264,7 +262,7 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { mut self, rng: &mut R, proof_shares: &[ProofShare], - ) -> Result { + ) -> Result { let proof = self.assemble_shares(proof_shares)?; // XXX if we change the proof verification API to use @@ -318,7 +316,7 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { pub fn receive_trusted_shares( mut self, proof_shares: &[ProofShare], - ) -> Result { + ) -> Result { self.assemble_shares(proof_shares) } } diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index a8797578..1ba698e3 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -1,14 +1,10 @@ use std::iter; -use rand::Rng; - use curve25519_dalek::ristretto::{self, RistrettoPoint}; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::IsIdentity; -use generators::GeneratorsView; -use inner_product_proof::{self, InnerProductProof}; -use proof_transcript::ProofTranscript; +use inner_product_proof; use util; #[derive(Serialize, Deserialize, Copy, Clone, Debug)] @@ -124,127 +120,3 @@ impl ProofShare { Ok(()) } } - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct AggregatedProof { - /// Commitment to the bits of the value - pub A: RistrettoPoint, - /// Commitment to the blinding factors - pub S: RistrettoPoint, - /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) - pub T_1: RistrettoPoint, - /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) - pub T_2: RistrettoPoint, - /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) - pub t_x: Scalar, - /// Blinding factor for the synthetic commitment to \\(t(x)\\) - pub t_x_blinding: Scalar, - /// Blinding factor for the synthetic commitment to the inner-product arguments - pub e_blinding: Scalar, - /// Proof data for the inner-product argument. - pub ipp_proof: InnerProductProof, -} - -impl AggregatedProof { - pub fn verify( - &self, - value_commitments: &[RistrettoPoint], - gens: GeneratorsView, - transcript: &mut ProofTranscript, - rng: &mut R, - n: usize, - m: usize, - ) -> Result<(), ()> { - transcript.commit_u64(n as u64); - transcript.commit_u64(m as u64); - - for V in value_commitments.iter() { - transcript.commit(V.compress().as_bytes()); - } - transcript.commit(self.A.compress().as_bytes()); - transcript.commit(self.S.compress().as_bytes()); - - let y = transcript.challenge_scalar(); - let z = transcript.challenge_scalar(); - - transcript.commit(self.T_1.compress().as_bytes()); - transcript.commit(self.T_2.compress().as_bytes()); - - let x = transcript.challenge_scalar(); - - transcript.commit(self.t_x.as_bytes()); - transcript.commit(self.t_x_blinding.as_bytes()); - transcript.commit(self.e_blinding.as_bytes()); - - let w = transcript.challenge_scalar(); - let zz = z * z; - let minus_z = -z; - - // Challenge value for batching statements to be verified - let c = Scalar::random(rng); - - let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); - - let s_inv = s.iter().rev(); - - let a = self.ipp_proof.a; - let b = self.ipp_proof.b; - - let g = s.iter().map(|s_i| minus_z - a * s_i); - - // Compute product in updated P - // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n - let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); - let powers_of_z = util::exp_iter(z).take(m); - let concat_z_and_2 = - powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); - - let h = s_inv - .zip(util::exp_iter(y.invert())) - .zip(concat_z_and_2) - .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); - - let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); - let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); - - let mega_check = ristretto::vartime::multiscalar_mul( - iter::once(Scalar::one()) - .chain(iter::once(x)) - .chain(value_commitment_scalars) - .chain(iter::once(c * x)) - .chain(iter::once(c * x * x)) - .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) - .chain(iter::once(basepoint_scalar)) - .chain(g) - .chain(h) - .chain(x_sq.iter().cloned()) - .chain(x_inv_sq.iter().cloned()), - iter::once(&self.A) - .chain(iter::once(&self.S)) - .chain(value_commitments.iter()) - .chain(iter::once(&self.T_1)) - .chain(iter::once(&self.T_2)) - .chain(iter::once(&gens.pedersen_generators.B_blinding)) - .chain(iter::once(&gens.pedersen_generators.B)) - .chain(gens.G.iter()) - .chain(gens.H.iter()) - .chain(self.ipp_proof.L_vec.iter()) - .chain(self.ipp_proof.R_vec.iter()), - ); - - if mega_check.is_identity() { - Ok(()) - } else { - Err(()) - } - } -} - -/// Compute delta(y,z) = (z - z^2)<1^n*m, y^n*m> + z^3 <1, 2^n*m> * \sum_j=0^(m-1) z^j -fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let sum_y = util::sum_of_powers(y, n * m); - let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); - let sum_z = util::sum_of_powers(z, m); - - (z - z * z) * sum_y - z * z * z * sum_2 * sum_z -} diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index 7febad82..c39a30f3 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -6,13 +6,12 @@ use curve25519_dalek::scalar::Scalar; use generators::Generators; use proof_transcript::ProofTranscript; +use range_proof::RangeProof; pub mod dealer; pub mod messages; pub mod party; -pub use self::messages::AggregatedProof; - pub struct SinglePartyAggregator {} impl SinglePartyAggregator { @@ -29,7 +28,7 @@ impl SinglePartyAggregator { values: &[u64], blindings: &[Scalar], n: usize, - ) -> Result { + ) -> Result { use self::dealer::*; use self::party::*; @@ -145,7 +144,7 @@ mod tests { // Verifier's scope { // 3. Deserialize - let proof: AggregatedProof = bincode::deserialize(&proof_bytes).unwrap(); + let proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); // 4. Verify with the same customization label as above let mut rng = OsRng::new().unwrap(); diff --git a/src/range_proof.rs b/src/range_proof.rs index df26f8e6..f7cce776 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -1,6 +1,5 @@ #![allow(non_snake_case)] #![deny(missing_docs)] - #![doc(include = "../docs/range-proof-protocol.md")] use rand::Rng; @@ -24,21 +23,21 @@ use generators::GeneratorsView; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { /// Commitment to the bits of the value - A: RistrettoPoint, + pub(crate) A: RistrettoPoint, /// Commitment to the blinding factors - S: RistrettoPoint, + pub(crate) S: RistrettoPoint, /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) - T_1: RistrettoPoint, + pub(crate) T_1: RistrettoPoint, /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) - T_2: RistrettoPoint, + pub(crate) T_2: RistrettoPoint, /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) - t_x: Scalar, + pub(crate) t_x: Scalar, /// Blinding factor for the synthetic commitment to \\(t(x)\\) - t_x_blinding: Scalar, + pub(crate) t_x_blinding: Scalar, /// Blinding factor for the synthetic commitment to the inner-product arguments - e_blinding: Scalar, + pub(crate) e_blinding: Scalar, /// Proof data for the inner-product argument. - ipp_proof: InnerProductProof, + pub(crate) ipp_proof: InnerProductProof, } impl RangeProof { @@ -70,7 +69,9 @@ impl RangeProof { use subtle::{Choice, ConditionallyAssignable}; // Commit the range size to domain-separate from rangeproofs of different lengths. + // Also commit the aggregation size (m = 1). transcript.commit_u64(n as u64); + transcript.commit_u64(1u64); // Create copies of G, H, so we can pass them to the // (consuming) IPP API later. @@ -189,32 +190,41 @@ impl RangeProof { /// Verifies a rangeproof for a given value commitment \\(V\\). /// - /// Usage: - /// ```ascii - /// let n = 64; - /// let generators = Generators::new(PedersenGenerators::default(), n, 1); - /// let mut transcript = ProofTranscript::new(b"RangeproofTest"); - /// proof.verify( - /// &V, - /// generators.share(0), - /// &mut transcript, - /// &mut OsRng::new().unwrap(), - /// n - /// ); - /// ``` - pub fn verify( + /// This is a convenience wrapper around `verify` for the `m=1` case. + /// + /// XXX add doctests + pub fn verify_single( &self, V: &RistrettoPoint, gens: GeneratorsView, transcript: &mut ProofTranscript, rng: &mut R, n: usize, + ) -> Result<(), ()> { + self.verify(&[*V], gens, transcript, rng, n, 1) + } + + /// Verifies an aggregated rangeproof for the given value commitments. + /// + /// XXX add doctests + pub fn verify( + &self, + value_commitments: &[RistrettoPoint], + gens: GeneratorsView, + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + m: usize, ) -> Result<(), ()> { // First, replay the "interactive" protocol using the proof // data to recompute all challenges. transcript.commit_u64(n as u64); - transcript.commit(V.compress().as_bytes()); + transcript.commit_u64(m as u64); + + for V in value_commitments.iter() { + transcript.commit(V.compress().as_bytes()); + } transcript.commit(self.A.compress().as_bytes()); transcript.commit(self.S.compress().as_bytes()); @@ -243,33 +253,41 @@ impl RangeProof { let a = self.ipp_proof.a; let b = self.ipp_proof.b; + // Construct concat_z_and_2, an iterator of the values of + // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n + let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); + let powers_of_z = util::exp_iter(z).take(m); + let concat_z_and_2 = + powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); + let g = s.iter().map(|s_i| minus_z - a * s_i); let h = s_inv - .zip(util::exp_iter(Scalar::from_u64(2))) .zip(util::exp_iter(y.invert())) - .map(|((s_i_inv, exp_2), exp_y_inv)| z + exp_y_inv * (zz * exp_2 - b * s_i_inv)); + .zip(concat_z_and_2) + .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); + + let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); + let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); let mega_check = ristretto::vartime::multiscalar_mul( iter::once(Scalar::one()) .chain(iter::once(x)) - .chain(iter::once(c * zz)) + .chain(value_commitment_scalars) .chain(iter::once(c * x)) .chain(iter::once(c * x * x)) - .chain(iter::once( - w * (self.t_x - a * b) + c * (delta(n, &y, &z) - self.t_x), - )) .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) + .chain(iter::once(basepoint_scalar)) .chain(g) .chain(h) .chain(x_sq.iter().cloned()) .chain(x_inv_sq.iter().cloned()), iter::once(&self.A) .chain(iter::once(&self.S)) - .chain(iter::once(V)) + .chain(value_commitments.iter()) .chain(iter::once(&self.T_1)) .chain(iter::once(&self.T_2)) - .chain(iter::once(&gens.pedersen_generators.B)) .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(iter::once(&gens.pedersen_generators.B)) .chain(gens.G.iter()) .chain(gens.H.iter()) .chain(self.ipp_proof.L_vec.iter()) @@ -286,23 +304,14 @@ impl RangeProof { /// Compute /// \\[ -/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{n} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{n} \rangle +/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle /// \\] -fn delta(n: usize, y: &Scalar, z: &Scalar) -> Scalar { - let two = Scalar::from_u64(2); - - // XXX this could be more efficient, esp for powers of 2 - let sum_of_powers_of_y = util::exp_iter(*y) - .take(n) - .fold(Scalar::zero(), |acc, x| acc + x); - - let sum_of_powers_of_2 = util::exp_iter(two) - .take(n) - .fold(Scalar::zero(), |acc, x| acc + x); - - let zz = z * z; +fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { + let sum_y = util::sum_of_powers(y, n * m); + let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let sum_z = util::sum_of_powers(z, m); - (z - zz) * sum_of_powers_of_y - z * zz * sum_of_powers_of_2 + (z - z * z) * sum_y - z * z * z * sum_2 * sum_z } #[cfg(test)] @@ -333,7 +342,7 @@ mod tests { exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) } - assert_eq!(power_g, delta(n, &y, &z),); + assert_eq!(power_g, delta(n, 1, &y, &z),); } /// Given a bitsize `n`, test the full trip: @@ -399,7 +408,7 @@ mod tests { let mut transcript = ProofTranscript::new(b"RangeproofTest"); assert!( range_proof - .verify( + .verify_single( &value_commitment, generators.share(0), &mut transcript, @@ -413,7 +422,7 @@ mod tests { let mut transcript = ProofTranscript::new(b""); assert!( range_proof - .verify( + .verify_single( &value_commitment, generators.share(0), &mut transcript, From 74549336f0e111323af8f1ee6e13209f25f5dbd1 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 14:11:26 -0700 Subject: [PATCH 161/186] Keep V in the ValueCommitment per GitHub discussion --- src/aggregated_range_proof/messages.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/aggregated_range_proof/messages.rs b/src/aggregated_range_proof/messages.rs index 1ba698e3..a716cadc 100644 --- a/src/aggregated_range_proof/messages.rs +++ b/src/aggregated_range_proof/messages.rs @@ -9,9 +9,6 @@ use util; #[derive(Serialize, Deserialize, Copy, Clone, Debug)] pub struct ValueCommitment { - /// XXX when we change the aggregation API to allow proving about - /// preexisting commitments, this should go away (and just be an - /// input to the dealer), but until then it should be here. pub V: RistrettoPoint, pub A: RistrettoPoint, pub S: RistrettoPoint, From f92ce80176c9a5ca8ccd1f8cc0d7385f3dfb7b67 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 14:48:03 -0700 Subject: [PATCH 162/186] Unify proof creation by using self-MPC for single-prover. This eliminates the `SinglePartyAggregator` and just makes it into a method on the `RangeProof` type. --- benches/bulletproofs.rs | 140 +++--------- src/aggregated_range_proof/mod.rs | 270 ---------------------- src/range_proof.rs | 368 +++++++++++++++--------------- 3 files changed, 217 insertions(+), 561 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index 10734300..0d0ffba4 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -14,109 +14,9 @@ use ristretto_bulletproofs::ProofTranscript; use ristretto_bulletproofs::RangeProof; use ristretto_bulletproofs::{Generators, PedersenGenerators}; -fn bench_create_helper(n: usize, c: &mut Criterion) { - c.bench_function(&format!("create_rangeproof_n_{}", n), move |b| { - let generators = Generators::new(PedersenGenerators::default(), n, 1); - let mut rng = OsRng::new().unwrap(); - - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); - - b.iter(|| { - // Each proof creation requires a clean transcript. - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - - RangeProof::generate_proof( - generators.share(0), - &mut transcript, - &mut rng, - n, - v, - &v_blinding, - ) - }) - }); -} - -fn bench_verify_helper(n: usize, c: &mut Criterion) { - c.bench_function(&format!("verify_rangeproof_n_{}", n), move |b| { - let pg = PedersenGenerators::default(); - let generators = Generators::new(pg.clone(), n, 1); - let mut rng = OsRng::new().unwrap(); - - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); - - let vc = pg.commit(Scalar::from_u64(v), v_blinding); - - let rp = RangeProof::generate_proof( - generators.share(0), - &mut transcript, - &mut rng, - n, - v, - &v_blinding, - ); - - b.iter(|| { - // Each verification requires a clean transcript. - let mut transcript = ProofTranscript::new(b"RangeproofTest"); - - rp.verify_single(&vc, generators.share(0), &mut transcript, &mut rng, n) - }); - }); -} - -fn create_rp_64(c: &mut Criterion) { - bench_create_helper(64, c); -} - -fn create_rp_32(c: &mut Criterion) { - bench_create_helper(32, c); -} - -fn create_rp_16(c: &mut Criterion) { - bench_create_helper(16, c); -} - -fn create_rp_8(c: &mut Criterion) { - bench_create_helper(8, c); -} - -criterion_group!{ - name = create_rp; - config = Criterion::default(); - targets = create_rp_8, create_rp_16, create_rp_32, create_rp_64 -} - -fn verify_rp_64(c: &mut Criterion) { - bench_verify_helper(64, c); -} - -fn verify_rp_32(c: &mut Criterion) { - bench_verify_helper(32, c); -} - -fn verify_rp_16(c: &mut Criterion) { - bench_verify_helper(16, c); -} - -fn verify_rp_8(c: &mut Criterion) { - bench_verify_helper(8, c); -} - -criterion_group!{ - name = verify_rp; - config = Criterion::default(); - targets = verify_rp_8, verify_rp_16, verify_rp_32, verify_rp_64 -} - static AGGREGATION_SIZES: [usize; 6] = [1, 2, 4, 8, 16, 32]; fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { - use ristretto_bulletproofs::aggregated_range_proof::SinglePartyAggregator; - let label = format!("Aggregated {}-bit rangeproof creation", n); c.bench_function_over_inputs( @@ -133,7 +33,7 @@ fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { // Each proof creation requires a clean transcript. let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); - SinglePartyAggregator::generate_proof( + RangeProof::prove_multiple( &generators, &mut transcript, &mut rng, @@ -147,6 +47,14 @@ fn create_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { ); } +fn create_aggregated_rangeproof_n_8(c: &mut Criterion) { + create_aggregated_rangeproof_helper(8, c); +} + +fn create_aggregated_rangeproof_n_16(c: &mut Criterion) { + create_aggregated_rangeproof_helper(16, c); +} + fn create_aggregated_rangeproof_n_32(c: &mut Criterion) { create_aggregated_rangeproof_helper(32, c); } @@ -156,8 +64,6 @@ fn create_aggregated_rangeproof_n_64(c: &mut Criterion) { } fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { - use ristretto_bulletproofs::aggregated_range_proof::SinglePartyAggregator; - let label = format!("Aggregated {}-bit rangeproof verification", n); c.bench_function_over_inputs( @@ -171,7 +77,7 @@ fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); let mut transcript = ProofTranscript::new(b"AggregateRangeProofBenchmark"); - let proof = SinglePartyAggregator::generate_proof( + let proof = RangeProof::prove_multiple( &generators, &mut transcript, &mut rng, @@ -206,6 +112,14 @@ fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { ); } +fn verify_aggregated_rangeproof_n_8(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(8, c); +} + +fn verify_aggregated_rangeproof_n_16(c: &mut Criterion) { + verify_aggregated_rangeproof_helper(16, c); +} + fn verify_aggregated_rangeproof_n_32(c: &mut Criterion) { verify_aggregated_rangeproof_helper(32, c); } @@ -215,13 +129,23 @@ fn verify_aggregated_rangeproof_n_64(c: &mut Criterion) { } criterion_group!{ - name = aggregate_rp; - config = Criterion::default(); + name = create_rp; + config = Criterion::default().sample_size(10); targets = + create_aggregated_rangeproof_n_8, + create_aggregated_rangeproof_n_16, create_aggregated_rangeproof_n_32, create_aggregated_rangeproof_n_64, +} + +criterion_group!{ + name = verify_rp; + config = Criterion::default(); + targets = + verify_aggregated_rangeproof_n_8, + verify_aggregated_rangeproof_n_16, verify_aggregated_rangeproof_n_32, - verify_aggregated_rangeproof_n_64 + verify_aggregated_rangeproof_n_64, } -criterion_main!(create_rp, verify_rp, aggregate_rp); +criterion_main!(create_rp, verify_rp); diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs index c39a30f3..25608618 100644 --- a/src/aggregated_range_proof/mod.rs +++ b/src/aggregated_range_proof/mod.rs @@ -1,275 +1,5 @@ #![allow(non_snake_case)] -use rand::Rng; - -use curve25519_dalek::scalar::Scalar; - -use generators::Generators; -use proof_transcript::ProofTranscript; -use range_proof::RangeProof; - pub mod dealer; pub mod messages; pub mod party; - -pub struct SinglePartyAggregator {} - -impl SinglePartyAggregator { - /// Create an aggregated rangeproof of multiple values. - /// - /// This performs the same proof aggregation MPC protocol, but - /// with one party playing all roles. - /// - /// The length of `values` must be a power of 2. - pub fn generate_proof( - generators: &Generators, - transcript: &mut ProofTranscript, - rng: &mut R, - values: &[u64], - blindings: &[Scalar], - n: usize, - ) -> Result { - use self::dealer::*; - use self::party::*; - - if values.len() != blindings.len() { - return Err("mismatched values and blindings len"); - } - - let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; - - let parties: Vec<_> = values - .iter() - .zip(blindings.iter()) - .map(|(&v, &v_blinding)| { - Party::new(v, v_blinding, n, &generators) - }) - // Collect the iterator of Results into a Result, then unwrap it - .collect::,_>>()?; - - let (parties, value_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .enumerate() - .map(|(j, p)| p.assign_position(j, rng)) - .unzip(); - - let (dealer, value_challenge) = dealer.receive_value_commitments(&value_commitments)?; - - let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties - .into_iter() - .map(|p| p.apply_challenge(&value_challenge, rng)) - .unzip(); - - let (dealer, poly_challenge) = dealer.receive_poly_commitments(&poly_commitments)?; - - let proof_shares: Vec<_> = parties - .into_iter() - .map(|p| p.apply_challenge(&poly_challenge)) - .collect(); - - dealer.receive_trusted_shares(&proof_shares) - } -} - -#[cfg(test)] -mod tests { - use rand::OsRng; - - use curve25519_dalek::ristretto::RistrettoPoint; - - use generators::PedersenGenerators; - - use super::*; - - /// Given a bitsize `n`, test the following: - /// - /// 1. Generate `m` random values and create a proof they are all in range; - /// 2. Serialize to wire format; - /// 3. Deserialize from wire format; - /// 4. Verify the proof. - fn singleparty_create_and_verify_helper(n: usize, m: usize) { - // Split the test into two scopes, so that it's explicit what - // data is shared between the prover and the verifier. - - // Use bincode for serialization - use bincode; - - // Both prover and verifier have access to the generators and the proof - let generators = Generators::new(PedersenGenerators::default(), n, m); - - // Serialized proof data - let proof_bytes: Vec; - let value_commitments: Vec; - - // Prover's scope - { - // 1. Generate the proof - - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let (min, max) = (0u64, ((1u128 << n) - 1) as u64); - let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); - let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); - - let proof = SinglePartyAggregator::generate_proof( - &generators, - &mut transcript, - &mut rng, - &values, - &blindings, - n, - ).unwrap(); - - // 2. Serialize - proof_bytes = bincode::serialize(&proof).unwrap(); - - let pg = &generators.all().pedersen_generators; - - // XXX would be nice to have some convenience API for this - value_commitments = values - .iter() - .zip(blindings.iter()) - .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) - .collect(); - } - - println!( - "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", - m, - n, - proof_bytes.len(), - ); - - // Verifier's scope - { - // 3. Deserialize - let proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); - - // 4. Verify with the same customization label as above - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - assert!( - proof - .verify( - &value_commitments, - generators.all(), - &mut transcript, - &mut rng, - n, - m - ) - .is_ok() - ); - } - } - - #[test] - fn create_and_verify_n_32_m_1() { - singleparty_create_and_verify_helper(32, 1); - } - - #[test] - fn create_and_verify_n_32_m_2() { - singleparty_create_and_verify_helper(32, 2); - } - - #[test] - fn create_and_verify_n_32_m_4() { - singleparty_create_and_verify_helper(32, 4); - } - - #[test] - fn create_and_verify_n_32_m_8() { - singleparty_create_and_verify_helper(32, 8); - } - - #[test] - fn create_and_verify_n_64_m_1() { - singleparty_create_and_verify_helper(64, 1); - } - - #[test] - fn create_and_verify_n_64_m_2() { - singleparty_create_and_verify_helper(64, 2); - } - - #[test] - fn create_and_verify_n_64_m_4() { - singleparty_create_and_verify_helper(64, 4); - } - - #[test] - fn create_and_verify_n_64_m_8() { - singleparty_create_and_verify_helper(64, 8); - } - - #[test] - fn detect_dishonest_party_during_aggregation() { - use self::dealer::*; - use self::party::*; - - // Simulate four parties, two of which will be dishonest and use a 64-bit value. - let m = 4; - let n = 32; - - let generators = Generators::new(PedersenGenerators::default(), n, m); - - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - // Parties 0, 2 are honest and use a 32-bit value - let v0 = rng.next_u32() as u64; - let v0_blinding = Scalar::random(&mut rng); - let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); - - let v2 = rng.next_u32() as u64; - let v2_blinding = Scalar::random(&mut rng); - let party2 = Party::new(v2, v2_blinding, n, &generators).unwrap(); - - // Parties 1, 3 are dishonest and use a 64-bit value - let v1 = rng.next_u64(); - let v1_blinding = Scalar::random(&mut rng); - let party1 = Party::new(v1, v1_blinding, n, &generators).unwrap(); - - let v3 = rng.next_u64(); - let v3_blinding = Scalar::random(&mut rng); - let party3 = Party::new(v3, v3_blinding, n, &generators).unwrap(); - - let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); - - let (party0, value_com0) = party0.assign_position(0, &mut rng); - let (party1, value_com1) = party1.assign_position(1, &mut rng); - let (party2, value_com2) = party2.assign_position(2, &mut rng); - let (party3, value_com3) = party3.assign_position(3, &mut rng); - - let (dealer, value_challenge) = dealer - .receive_value_commitments(&[value_com0, value_com1, value_com2, value_com3]) - .unwrap(); - - let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); - let (party1, poly_com1) = party1.apply_challenge(&value_challenge, &mut rng); - let (party2, poly_com2) = party2.apply_challenge(&value_challenge, &mut rng); - let (party3, poly_com3) = party3.apply_challenge(&value_challenge, &mut rng); - - let (dealer, poly_challenge) = dealer - .receive_poly_commitments(&[poly_com0, poly_com1, poly_com2, poly_com3]) - .unwrap(); - - let share0 = party0.apply_challenge(&poly_challenge); - let share1 = party1.apply_challenge(&poly_challenge); - let share2 = party2.apply_challenge(&poly_challenge); - let share3 = party3.apply_challenge(&poly_challenge); - - match dealer.receive_shares(&mut rng, &[share0, share1, share2, share3]) { - Ok(_proof) => { - panic!("The proof was malformed, but it was not detected"); - } - Err(e) => { - // XXX when we have error types, check that it was party 1 that did it - assert_eq!(e, "proof failed to verify"); - } - } - } -} diff --git a/src/range_proof.rs b/src/range_proof.rs index f7cce776..2547092a 100644 --- a/src/range_proof.rs +++ b/src/range_proof.rs @@ -11,14 +11,11 @@ use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; use curve25519_dalek::traits::IsIdentity; +use generators::{Generators, GeneratorsView}; use inner_product_proof::InnerProductProof; - use proof_transcript::ProofTranscript; - use util; -use generators::GeneratorsView; - /// The `RangeProof` struct represents a single range proof. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { @@ -44,148 +41,68 @@ impl RangeProof { /// Create a rangeproof for a given pair of value `v` and /// blinding scalar `v_blinding`. /// - /// Usage: - /// ```ascii - /// let n = 64; - /// let generators = Generators::new(PedersenGenerators::default(), n, 1); - /// let mut transcript = ProofTranscript::new(b"RangeproofTest"); - /// let proof = RangeProof::generate_proof( - /// generators.share(0), - /// &mut transcript, - /// &mut rng, - /// n, - /// v, - /// &v_blinding, - /// ); - /// ``` - pub fn generate_proof( - generators: GeneratorsView, + /// XXX add doctests + pub fn prove_multiple_single( + generators: &Generators, transcript: &mut ProofTranscript, rng: &mut R, - n: usize, v: u64, v_blinding: &Scalar, - ) -> RangeProof { - use subtle::{Choice, ConditionallyAssignable}; - - // Commit the range size to domain-separate from rangeproofs of different lengths. - // Also commit the aggregation size (m = 1). - transcript.commit_u64(n as u64); - transcript.commit_u64(1u64); - - // Create copies of G, H, so we can pass them to the - // (consuming) IPP API later. - let G = generators.G.to_vec(); - let H = generators.H.to_vec(); - - let V = generators - .pedersen_generators - .commit(Scalar::from_u64(v), *v_blinding); - - let a_blinding = Scalar::random(rng); - - // Compute A = + + a_blinding * B_blinding. - let mut A = generators.pedersen_generators.B_blinding * a_blinding; - for i in 0..n { - // If v_i = 0, we add a_L[i] * G[i] + a_R[i] * H[i] = - H[i] - // If v_i = 1, we add a_L[i] * G[i] + a_R[i] * H[i] = G[i] - let v_i = Choice::from(((v >> i) & 1) as u8); - let mut point = -H[i]; - point.conditional_assign(&G[i], v_i); - A += point; - } - - let s_blinding = Scalar::random(rng); - let s_L: Vec<_> = (0..n).map(|_| Scalar::random(rng)).collect(); - let s_R: Vec<_> = (0..n).map(|_| Scalar::random(rng)).collect(); - - // Compute S = + + s_blinding * B_blinding. - let S = ristretto::multiscalar_mul( - iter::once(&s_blinding).chain(s_L.iter()).chain(s_R.iter()), - iter::once(&generators.pedersen_generators.B_blinding) - .chain(G.iter()) - .chain(H.iter()), - ); + n: usize, + ) -> Result { + RangeProof::prove_multiple(generators, transcript, rng, &[v], &[*v_blinding], n) + } - // Commit to V, A, S and get challenges y, z - transcript.commit(V.compress().as_bytes()); - transcript.commit(A.compress().as_bytes()); - transcript.commit(S.compress().as_bytes()); - let y = transcript.challenge_scalar(); - let z = transcript.challenge_scalar(); - let zz = z * z; + /// Create a rangeproof for a set of values. + /// + /// XXX add doctests + pub fn prove_multiple( + generators: &Generators, + transcript: &mut ProofTranscript, + rng: &mut R, + values: &[u64], + blindings: &[Scalar], + n: usize, + ) -> Result { + use aggregated_range_proof::dealer::*; + use aggregated_range_proof::party::*; - // Compute l, r - let mut l_poly = util::VecPoly1::zero(n); - let mut r_poly = util::VecPoly1::zero(n); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + if values.len() != blindings.len() { + return Err("mismatched values and blindings len"); + } - for i in 0..n { - let a_L_i = Scalar::from_u64((v >> i) & 1); - let a_R_i = a_L_i - Scalar::one(); + let dealer = Dealer::new(generators.all(), n, values.len(), transcript)?; - l_poly.0[i] = a_L_i - z; - l_poly.1[i] = s_L[i]; - r_poly.0[i] = exp_y * (a_R_i + z) + zz * exp_2; - r_poly.1[i] = exp_y * s_R[i]; + let parties: Vec<_> = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| { + Party::new(v, v_blinding, n, &generators) + }) + // Collect the iterator of Results into a Result, then unwrap it + .collect::,_>>()?; - exp_y *= y; // y^i -> y^(i+1) - exp_2 += exp_2; // 2^i -> 2^(i+1) - } + let (parties, value_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .enumerate() + .map(|(j, p)| p.assign_position(j, rng)) + .unzip(); - // Compute t(x) = - let t_poly = l_poly.inner_product(&r_poly); - - // Form commitments T_1, T_2 to t.1, t.2 - let t_1_blinding = Scalar::random(rng); - let t_2_blinding = Scalar::random(rng); - let T_1 = generators - .pedersen_generators - .commit(t_poly.1, t_1_blinding); - let T_2 = generators - .pedersen_generators - .commit(t_poly.2, t_2_blinding); - - // Commit to T_1, T_2 to get the challenge point x - transcript.commit(T_1.compress().as_bytes()); - transcript.commit(T_2.compress().as_bytes()); - let x = transcript.challenge_scalar(); + let (dealer, value_challenge) = dealer.receive_value_commitments(&value_commitments)?; - // Evaluate t at x and run the IPP - let t_x = t_poly.eval(x); - let t_x_blinding = zz * v_blinding + x * (t_1_blinding + x * t_2_blinding); - let e_blinding = a_blinding + x * s_blinding; + let (parties, poly_commitments): (Vec<_>, Vec<_>) = parties + .into_iter() + .map(|p| p.apply_challenge(&value_challenge, rng)) + .unzip(); - transcript.commit(t_x.as_bytes()); - transcript.commit(t_x_blinding.as_bytes()); - transcript.commit(e_blinding.as_bytes()); + let (dealer, poly_challenge) = dealer.receive_poly_commitments(&poly_commitments)?; - // Get a challenge value to combine statements for the IPP - let w = transcript.challenge_scalar(); - let Q = w * generators.pedersen_generators.B; - - // Generate the IPP proof - let ipp_proof = InnerProductProof::create( - transcript, - &Q, - util::exp_iter(y.invert()), - G, - H, - l_poly.eval(x), - r_poly.eval(x), - ); + let proof_shares: Vec<_> = parties + .into_iter() + .map(|p| p.apply_challenge(&poly_challenge)) + .collect(); - RangeProof { - A, - S, - T_1, - T_2, - t_x, - t_x_blinding, - e_blinding, - ipp_proof, - } + dealer.receive_trusted_shares(&proof_shares) } /// Verifies a rangeproof for a given value commitment \\(V\\). @@ -319,6 +236,8 @@ mod tests { use super::*; use rand::OsRng; + use generators::PedersenGenerators; + #[test] fn test_delta() { let mut rng = OsRng::new().unwrap(); @@ -345,13 +264,13 @@ mod tests { assert_eq!(power_g, delta(n, 1, &y, &z),); } - /// Given a bitsize `n`, test the full trip: + /// Given a bitsize `n`, test the following: /// - /// 1. Generate a random value and create a proof that it's in range; + /// 1. Generate `m` random values and create a proof they are all in range; /// 2. Serialize to wire format; /// 3. Deserialize from wire format; /// 4. Verify the proof. - fn create_and_verify_helper(n: usize) { + fn singleparty_create_and_verify_helper(n: usize, m: usize) { // Split the test into two scopes, so that it's explicit what // data is shared between the prover and the verifier. @@ -359,98 +278,181 @@ mod tests { use bincode; // Both prover and verifier have access to the generators and the proof - use generators::{Generators, PedersenGenerators}; - let generators = Generators::new(PedersenGenerators::default(), n, 1); + let generators = Generators::new(PedersenGenerators::default(), n, m); // Serialized proof data let proof_bytes: Vec; - let value_commitment: RistrettoPoint; + let value_commitments: Vec; // Prover's scope { - // Use a customization label for testing proofs - let mut transcript = ProofTranscript::new(b"RangeproofTest"); + // 1. Generate the proof + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - let v: u64 = rng.gen_range(0, (1 << (n - 1)) - 1); - let v_blinding = Scalar::random(&mut rng); + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); - let range_proof = RangeProof::generate_proof( - generators.share(0), + let proof = RangeProof::prove_multiple( + &generators, &mut transcript, &mut rng, + &values, + &blindings, n, - v, - &v_blinding, - ); + ).unwrap(); // 2. Serialize - proof_bytes = bincode::serialize(&range_proof).unwrap(); + proof_bytes = bincode::serialize(&proof).unwrap(); - let gens = generators.share(0); - value_commitment = gens.pedersen_generators - .commit(Scalar::from_u64(v), v_blinding); + let pg = &generators.all().pedersen_generators; + + // XXX would be nice to have some convenience API for this + value_commitments = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .collect(); } println!( - "Rangeproof with {} bits has size {} bytes", + "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", + m, n, - proof_bytes.len() + proof_bytes.len(), ); // Verifier's scope { // 3. Deserialize - let range_proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); + let proof: RangeProof = bincode::deserialize(&proof_bytes).unwrap(); + + // 4. Verify with the same customization label as above let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - // 4. Use the same customization label as above to verify - let mut transcript = ProofTranscript::new(b"RangeproofTest"); assert!( - range_proof - .verify_single( - &value_commitment, - generators.share(0), + proof + .verify( + &value_commitments, + generators.all(), &mut transcript, &mut rng, - n + n, + m ) .is_ok() ); - - // Verification with a different label fails - let mut transcript = ProofTranscript::new(b""); - assert!( - range_proof - .verify_single( - &value_commitment, - generators.share(0), - &mut transcript, - &mut rng, - n - ) - .is_err() - ); } } #[test] - fn create_and_verify_8() { - create_and_verify_helper(8); + fn create_and_verify_n_32_m_1() { + singleparty_create_and_verify_helper(32, 1); + } + + #[test] + fn create_and_verify_n_32_m_2() { + singleparty_create_and_verify_helper(32, 2); + } + + #[test] + fn create_and_verify_n_32_m_4() { + singleparty_create_and_verify_helper(32, 4); + } + + #[test] + fn create_and_verify_n_32_m_8() { + singleparty_create_and_verify_helper(32, 8); } #[test] - fn create_and_verify_16() { - create_and_verify_helper(16); + fn create_and_verify_n_64_m_1() { + singleparty_create_and_verify_helper(64, 1); } #[test] - fn create_and_verify_32() { - create_and_verify_helper(32); + fn create_and_verify_n_64_m_2() { + singleparty_create_and_verify_helper(64, 2); } #[test] - fn create_and_verify_64() { - create_and_verify_helper(64); + fn create_and_verify_n_64_m_4() { + singleparty_create_and_verify_helper(64, 4); + } + + #[test] + fn create_and_verify_n_64_m_8() { + singleparty_create_and_verify_helper(64, 8); + } + + #[test] + fn detect_dishonest_party_during_aggregation() { + use aggregated_range_proof::dealer::*; + use aggregated_range_proof::party::*; + + // Simulate four parties, two of which will be dishonest and use a 64-bit value. + let m = 4; + let n = 32; + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + // Parties 0, 2 are honest and use a 32-bit value + let v0 = rng.next_u32() as u64; + let v0_blinding = Scalar::random(&mut rng); + let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); + + let v2 = rng.next_u32() as u64; + let v2_blinding = Scalar::random(&mut rng); + let party2 = Party::new(v2, v2_blinding, n, &generators).unwrap(); + + // Parties 1, 3 are dishonest and use a 64-bit value + let v1 = rng.next_u64(); + let v1_blinding = Scalar::random(&mut rng); + let party1 = Party::new(v1, v1_blinding, n, &generators).unwrap(); + + let v3 = rng.next_u64(); + let v3_blinding = Scalar::random(&mut rng); + let party3 = Party::new(v3, v3_blinding, n, &generators).unwrap(); + + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); + + let (party0, value_com0) = party0.assign_position(0, &mut rng); + let (party1, value_com1) = party1.assign_position(1, &mut rng); + let (party2, value_com2) = party2.assign_position(2, &mut rng); + let (party3, value_com3) = party3.assign_position(3, &mut rng); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&[value_com0, value_com1, value_com2, value_com3]) + .unwrap(); + + let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); + let (party1, poly_com1) = party1.apply_challenge(&value_challenge, &mut rng); + let (party2, poly_com2) = party2.apply_challenge(&value_challenge, &mut rng); + let (party3, poly_com3) = party3.apply_challenge(&value_challenge, &mut rng); + + let (dealer, poly_challenge) = dealer + .receive_poly_commitments(&[poly_com0, poly_com1, poly_com2, poly_com3]) + .unwrap(); + + let share0 = party0.apply_challenge(&poly_challenge); + let share1 = party1.apply_challenge(&poly_challenge); + let share2 = party2.apply_challenge(&poly_challenge); + let share3 = party3.apply_challenge(&poly_challenge); + + match dealer.receive_shares(&mut rng, &[share0, share1, share2, share3]) { + Ok(_proof) => { + panic!("The proof was malformed, but it was not detected"); + } + Err(e) => { + // XXX when we have error types, check that it was party 1 that did it + assert_eq!(e, "proof failed to verify"); + } + } } } From 1f6be819f2e91327bedb88a8b93bbb48643edace Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 15:01:51 -0700 Subject: [PATCH 163/186] Rearrange source tree. This deletes the `aggregated_range_proof` module entirely, and moves its remaining contents into the rangeproof code. --- src/aggregated_range_proof/mod.rs | 5 ----- src/lib.rs | 8 +++++++- .../dealer.rs | 0 .../messages.rs | 0 src/{range_proof.rs => range_proof/mod.rs} | 15 ++++++++++----- .../party.rs | 0 6 files changed, 17 insertions(+), 11 deletions(-) delete mode 100644 src/aggregated_range_proof/mod.rs rename src/{aggregated_range_proof => range_proof}/dealer.rs (100%) rename src/{aggregated_range_proof => range_proof}/messages.rs (100%) rename src/{range_proof.rs => range_proof/mod.rs} (98%) rename src/{aggregated_range_proof => range_proof}/party.rs (100%) diff --git a/src/aggregated_range_proof/mod.rs b/src/aggregated_range_proof/mod.rs deleted file mode 100644 index 25608618..00000000 --- a/src/aggregated_range_proof/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -#![allow(non_snake_case)] - -pub mod dealer; -pub mod messages; -pub mod party; diff --git a/src/lib.rs b/src/lib.rs index 2269f42b..6941eebf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,6 @@ mod util; #[doc(include = "../docs/notes.md")] mod notes {} -pub mod aggregated_range_proof; mod generators; mod inner_product_proof; mod proof_transcript; @@ -37,3 +36,10 @@ mod range_proof; pub use generators::{Generators, GeneratorsView, PedersenGenerators}; pub use proof_transcript::ProofTranscript; pub use range_proof::RangeProof; + +/// API for performing the aggregate-proving multiparty computation protocol. +pub mod aggregation { + pub use range_proof::dealer; + pub use range_proof::messages; + pub use range_proof::party; +} diff --git a/src/aggregated_range_proof/dealer.rs b/src/range_proof/dealer.rs similarity index 100% rename from src/aggregated_range_proof/dealer.rs rename to src/range_proof/dealer.rs diff --git a/src/aggregated_range_proof/messages.rs b/src/range_proof/messages.rs similarity index 100% rename from src/aggregated_range_proof/messages.rs rename to src/range_proof/messages.rs diff --git a/src/range_proof.rs b/src/range_proof/mod.rs similarity index 98% rename from src/range_proof.rs rename to src/range_proof/mod.rs index 2547092a..e42a6291 100644 --- a/src/range_proof.rs +++ b/src/range_proof/mod.rs @@ -1,5 +1,4 @@ #![allow(non_snake_case)] -#![deny(missing_docs)] #![doc(include = "../docs/range-proof-protocol.md")] use rand::Rng; @@ -16,6 +15,12 @@ use inner_product_proof::InnerProductProof; use proof_transcript::ProofTranscript; use util; +// Modules for MPC protocol + +pub mod dealer; +pub mod messages; +pub mod party; + /// The `RangeProof` struct represents a single range proof. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { @@ -64,8 +69,8 @@ impl RangeProof { blindings: &[Scalar], n: usize, ) -> Result { - use aggregated_range_proof::dealer::*; - use aggregated_range_proof::party::*; + use self::dealer::*; + use self::party::*; if values.len() != blindings.len() { return Err("mismatched values and blindings len"); @@ -390,8 +395,8 @@ mod tests { #[test] fn detect_dishonest_party_during_aggregation() { - use aggregated_range_proof::dealer::*; - use aggregated_range_proof::party::*; + use self::dealer::*; + use self::party::*; // Simulate four parties, two of which will be dishonest and use a 64-bit value. let m = 4; diff --git a/src/aggregated_range_proof/party.rs b/src/range_proof/party.rs similarity index 100% rename from src/aggregated_range_proof/party.rs rename to src/range_proof/party.rs From 9770a7903a8fd9d83c21aa5bcb06a703c07d36f2 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Tue, 1 May 2018 16:00:29 -0700 Subject: [PATCH 164/186] Keep RangeProof fields private now that aggregation code is part of the range_proof tree --- src/range_proof/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index e42a6291..5701e70f 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -25,21 +25,21 @@ pub mod party; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct RangeProof { /// Commitment to the bits of the value - pub(crate) A: RistrettoPoint, + A: RistrettoPoint, /// Commitment to the blinding factors - pub(crate) S: RistrettoPoint, + S: RistrettoPoint, /// Commitment to the \\(t_1\\) coefficient of \\( t(x) \\) - pub(crate) T_1: RistrettoPoint, + T_1: RistrettoPoint, /// Commitment to the \\(t_2\\) coefficient of \\( t(x) \\) - pub(crate) T_2: RistrettoPoint, + T_2: RistrettoPoint, /// Evaluation of the polynomial \\(t(x)\\) at the challenge point \\(x\\) - pub(crate) t_x: Scalar, + t_x: Scalar, /// Blinding factor for the synthetic commitment to \\(t(x)\\) - pub(crate) t_x_blinding: Scalar, + t_x_blinding: Scalar, /// Blinding factor for the synthetic commitment to the inner-product arguments - pub(crate) e_blinding: Scalar, + e_blinding: Scalar, /// Proof data for the inner-product argument. - pub(crate) ipp_proof: InnerProductProof, + ipp_proof: InnerProductProof, } impl RangeProof { From 99479bcd653c3c7b7af01731708ef65ecdb63409 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Wed, 2 May 2018 09:46:17 -0700 Subject: [PATCH 165/186] Simplify API by reading m from the length of value_commitments --- benches/bulletproofs.rs | 1 - src/range_proof/dealer.rs | 1 - src/range_proof/mod.rs | 8 ++++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/benches/bulletproofs.rs b/benches/bulletproofs.rs index 0d0ffba4..b7a06e47 100644 --- a/benches/bulletproofs.rs +++ b/benches/bulletproofs.rs @@ -104,7 +104,6 @@ fn verify_aggregated_rangeproof_helper(n: usize, c: &mut Criterion) { &mut transcript, &mut rng, n, - m, ) }); }, diff --git a/src/range_proof/dealer.rs b/src/range_proof/dealer.rs index a719225b..c4ec6c51 100644 --- a/src/range_proof/dealer.rs +++ b/src/range_proof/dealer.rs @@ -280,7 +280,6 @@ impl<'a, 'b> DealerAwaitingProofShares<'a, 'b> { &mut self.initial_transcript, rng, self.n, - self.m, ) .is_ok() { diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 5701e70f..af042425 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -47,7 +47,7 @@ impl RangeProof { /// blinding scalar `v_blinding`. /// /// XXX add doctests - pub fn prove_multiple_single( + pub fn prove_single( generators: &Generators, transcript: &mut ProofTranscript, rng: &mut R, @@ -123,7 +123,7 @@ impl RangeProof { rng: &mut R, n: usize, ) -> Result<(), ()> { - self.verify(&[*V], gens, transcript, rng, n, 1) + self.verify(&[*V], gens, transcript, rng, n) } /// Verifies an aggregated rangeproof for the given value commitments. @@ -136,11 +136,12 @@ impl RangeProof { transcript: &mut ProofTranscript, rng: &mut R, n: usize, - m: usize, ) -> Result<(), ()> { // First, replay the "interactive" protocol using the proof // data to recompute all challenges. + let m = value_commitments.len(); + transcript.commit_u64(n as u64); transcript.commit_u64(m as u64); @@ -346,7 +347,6 @@ mod tests { &mut transcript, &mut rng, n, - m ) .is_ok() ); From b6ead1e41a973073cc9f6cb38f6f73362edc4eb0 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 10:50:59 -0700 Subject: [PATCH 166/186] change to latex form --- docs/notes.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index ec5dfd4a..ebf11a65 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -606,24 +606,24 @@ additional and final step involves sending a pair of scalars Aggregated Range Proof ====================== -We want to create an aggregated range proof for `m` values that is more efficient to create and verify than `m` individual range proofs. +We want to create an aggregated range proof for \\(m\\) values that is more efficient to create and verify than \\(m\\) individual range proofs. -The aggregation protocol is a multi-party computation protocol, involving `m` parties (one party per value) and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. +The aggregation protocol is a multi-party computation protocol, involving \\(m\\) parties (one party per value) and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. -The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both `m` and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in `m`. We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is more straightforward. +The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both \\(m\\) and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in \\(m\\). We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is more straightforward. For more information on how the aggregation protocol works and is implemented, see the [protocol notes](../aggregated_range_proof/index.html). The aggregated range proof has the same form as the individual range proof, in that the provers (the parties) still perform the same calculations to prove that \\(t(x) = \langle \mathbf{l}(x), \mathbf{r}(x) \rangle \\) and that \\(t_0, \mathbf{l}(x), \mathbf{r}(x)\\) are correct. The difference is that the challenge values are obtained from the dealer, which generates them by combining commitments from all the parties, and that the calculations of different parties are seperated by different powers of the challenge scalars \\(y\\) and \\(z\\). -We will explain how one piece of the aggregated proof is generated for party `j`, and then will show how all of the pieces for all of the `m` parties can be combined into one aggregated proof. +We will explain how one piece of the aggregated proof is generated for party \\(j\\), and then will show how all of the pieces for all of the \\(m\\) parties can be combined into one aggregated proof. -Party `j` begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript \\({(j)}\\) denotes the `j`th party's value. +Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript \\({(j)}\\) denotes the \\(j\\)th party's value. Proving range statements with bit vectors ----------------------------------------- -We want to make statements about \\(v_{(j)}\\) using its bit vector representation, where the statements will be true if and only if \\(v_{(j)}\\) is actually in the expected range. We will not reproduce the steps or explanation here since it is the same as in the [proving range statements with bit vectors](index.html#proving-range-statements-with-bit-vectors) step of the single-value range proof. Here are the final statements for party `j`: +We want to make statements about \\(v_{(j)}\\) using its bit vector representation, where the statements will be true if and only if \\(v_{(j)}\\) is actually in the expected range. We will not reproduce the steps or explanation here since it is the same as in the [proving range statements with bit vectors](index.html#proving-range-statements-with-bit-vectors) step of the single-value range proof. Here are the final statements for party \\(j\\): \\[ \begin{aligned} @@ -636,10 +636,10 @@ We want to make statements about \\(v_{(j)}\\) using its bit vector representati Proving vectors of statements with a single statement ----------------------------------------------------- -We want to combine the above three statements into a single statement for party `j`, as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party `j`. Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all `m` parties. +We want to combine the above three statements into a single statement for party \\(j\\), as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party \\(j\\). Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all \\(m\\) parties. First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties -, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), a length `n` slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party `j`: +, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), a length \\(n\\) slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party \\(j\\): \\[ \begin{aligned} @@ -650,7 +650,7 @@ First, we will combine each of the two vector-statements into a single statement \\] The three resulting statements can then be combined in the same way, -using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} = z^j\\) that is unique to each party `j`: +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} = z^j\\) that is unique to each party \\(j\\): \\[ \begin{aligned} z^{2} z\_{(j)} \cdot v_{(j)} @@ -692,7 +692,7 @@ Proving that \\(t\_{(j)}(x)\\) is correct means proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correctly formed, and that \\(t\_{(j)}(x) = {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{r}}\_{(j)}(x) \rangle}\\). -We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) from all `m` parties in the following manner: +We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) from all \\(m\\) parties in the following manner: \\[ \begin{aligned} @@ -702,9 +702,9 @@ We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x) \end{aligned} \\] -We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party`j`, so all of the \\(t_{(j)}(x)\\) values will be offset from one another. +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party\\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. -Now instead of having to do `m` individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties `j` are correct, we can do the verification with one check: +Now instead of having to do \\(m\\) individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties \\(j\\) are correct, we can do the verification with one check: \\[ \begin{aligned} @@ -726,7 +726,7 @@ Proving that \\(t\_{(j)0}\\) is correct requires first creating commitments to t \end{aligned} \\] -If we combine all of the statements about \\(t\_{(j)0}\\) from all of the `j` parties by adding them together, then we get: +If we combine all of the statements about \\(t\_{(j)0}\\) from all of the \\(j\\) parties by adding them together, then we get: \\[ \begin{aligned} @@ -762,7 +762,7 @@ Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n \\] -Now instead of having to do `m` individual checks to prove that \\(t\_{(j)0}\\) for all parties `j` are correct, we can do the verification with one check using the combined values: +Now instead of having to do \\(m\\) individual checks to prove that \\(t\_{(j)0}\\) for all parties \\(j\\) are correct, we can do the verification with one check using the combined values: \\[ \begin{aligned} @@ -791,9 +791,9 @@ Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are corr \end{aligned} \\] -\\({\mathbf{G}\_{(j)}}\\) is party `j`'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_{(j)}}\\) is party `j`'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). +\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). -If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the `j` parties by adding them together, then we get: +If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the \\(j\\) parties by adding them together, then we get: \\[ \begin{aligned} @@ -834,7 +834,7 @@ Therefore, we can simplify the following statements: \end{aligned} \\] -We can combine the values and commitments from all the `m` parties by summing them directly: +We can combine the values and commitments from all the \\(m\\) parties by summing them directly: \\[ \begin{aligned} @@ -844,7 +844,7 @@ We can combine the values and commitments from all the `m` parties by summing th \end{aligned} \\] -With these observations, we can simplify the combined `m`-party statement about \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) into: +With these observations, we can simplify the combined \\(m\\)-party statement about \\({\mathbf{l}}(x)\\) and \\({\mathbf{r}}(x)\\) into: \\[ \begin{aligned} From ee40de682b7a4bedbc1c75a29505590a9b9dd0bb Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 11:26:28 -0700 Subject: [PATCH 167/186] change _(j)L to _L, (j) for L/R/0/1/2 --- docs/notes.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index ebf11a65..f1784a3e 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -627,9 +627,9 @@ We want to make statements about \\(v_{(j)}\\) using its bit vector representati \\[ \begin{aligned} - {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ - {\mathbf{a}}\_{(j)L} \circ {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} \\\\ - ({\mathbf{a}}\_{(j)L} - {\mathbf{1}}) - {\mathbf{a}}\_{(j)R} &= {\mathbf{0}} + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ + {\mathbf{a}}\_{L, (j)} \circ {\mathbf{a}}\_{R, (j)} &= {\mathbf{0}} \\\\ + ({\mathbf{a}}\_{L, (j)} - {\mathbf{1}}) - {\mathbf{a}}\_{R, (j)} &= {\mathbf{0}} \end{aligned} \\] @@ -643,9 +643,9 @@ First, we will combine each of the two vector-statements into a single statement \\[ \begin{aligned} - {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ - {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \\\\ - {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{a}}\_{(j)R} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} &= v_{(j)} \\\\ + {\langle {\mathbf{a}}\_{L, (j)} - {\mathbf{1}} - {\mathbf{a}}\_{R, (j)}, {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \\\\ + {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{a}}\_{R, (j)} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} &= 0 \end{aligned} \\] @@ -655,9 +655,9 @@ using the verifier’s choice of challenge value \\(z\\) that is shared across a \begin{aligned} z^{2} z\_{(j)} \cdot v_{(j)} &= - z^{2} z\_{(j)} \cdot {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{2}}^{n} \rangle} \\\\ - &+ z \cdot {\langle {\mathbf{a}}\_{(j)L} - {\mathbf{1}} - {\mathbf{a}}\_{(j)R}, {\mathbf{y}}^{n}\_{(j)} \rangle} \\\\ - &+ {\langle {\mathbf{a}}\_{(j)L}, {\mathbf{a}}\_{(j)R} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} + z^{2} z\_{(j)} \cdot {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{2}}^{n} \rangle} \\\\ + &+ z \cdot {\langle {\mathbf{a}}\_{L, (j)} - {\mathbf{1}} - {\mathbf{a}}\_{R, (j)}, {\mathbf{y}}^{n}\_{(j)} \rangle} \\\\ + &+ {\langle {\mathbf{a}}\_{L, (j)}, {\mathbf{a}}\_{R, (j)} \circ {\mathbf{y}}^{n}\_{(j)} \rangle} \end{aligned} \\] @@ -669,19 +669,19 @@ We combine the terms in the preceding statement into a single inner product, usi \\[ \begin{aligned} \delta_{(j)}(y,z) &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle}\\\\ - z^{2}z_{(j)} \cdot v_{(j)} + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{(j)L} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{(j)R} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} + z^{2}z_{(j)} \cdot v_{(j)} + \delta_{(j)}(y,z) &= {\langle {\mathbf{a}}\_{L, (j)} - z {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \circ ({\mathbf{a}}\_{R, (j)} + z {\mathbf{1}}) + z^{2} z_{(j)} \cdot {\mathbf{2}}^{n} \rangle} \end{aligned} \\] Blinding the inner product -------------------------- -The prover chooses vectors of blinding factors \\( \mathbf{s}\_{(j)L}, {\mathbf{s}}\_{(j)R} \\), and uses them to construct the blinded vector polynomials \\(\mathbf{l}\_{(j)}(x), \mathbf{r}\_{(j)}(x)\\). We will not reproduce the steps or the explanation here since it is the same as in the [blinding the inner product](index.html#blinding-the-inner-product) step of the single-value proof. Here are the final equations for the vector polynomials: +The prover chooses vectors of blinding factors \\( \mathbf{s}\_{L, (j)}, {\mathbf{s}}\_{R, (j)} \\), and uses them to construct the blinded vector polynomials \\(\mathbf{l}\_{(j)}(x), \mathbf{r}\_{(j)}(x)\\). We will not reproduce the steps or the explanation here since it is the same as in the [blinding the inner product](index.html#blinding-the-inner-product) step of the single-value proof. Here are the final equations for the vector polynomials: \\[ \begin{aligned} - {\mathbf{l}}\_{(j)}(x) &= ({\mathbf{a}}\_{(j)L} + {\mathbf{s}}\_{(j)L} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ - {\mathbf{r}}\_{(j)}(x) &= {\mathbf{y}}^{n}\_{(j)} \circ \left( ({\mathbf{a}}\_{(j)R} + {\mathbf{s}}\_{(j)R} x\right) + z {\mathbf{1}}) + z^{2} z_{(j)} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} + {\mathbf{l}}\_{(j)}(x) &= ({\mathbf{a}}\_{L, (j)} + {\mathbf{s}}\_{L, (j)} x) - z {\mathbf{1}} & \in {\mathbb Z\_p}[x]^{n} \\\\ + {\mathbf{r}}\_{(j)}(x) &= {\mathbf{y}}^{n}\_{(j)} \circ \left( ({\mathbf{a}}\_{R, (j)} + {\mathbf{s}}\_{R, (j)} x\right) + z {\mathbf{1}}) + z^{2} z_{(j)} {\mathbf{2}}^{n} &\in {\mathbb Z\_p}[x]^{n} \end{aligned} \\] @@ -721,7 +721,7 @@ Proving that \\(t\_{(j)0}\\) is correct requires first creating commitments to t \\[ \begin{aligned} - t\_{(j)}(x) B + {\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 z\_{(j)} V_{(j)} + \delta\_{(j)}(y,z) B + x T\_{(j)1} + x^{2} T\_{(j)2}\\\\ + t\_{(j)}(x) B + {\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 z\_{(j)} V_{(j)} + \delta\_{(j)}(y,z) B + x T\_{1, (j)} + x^{2} T\_{2, (j)}\\\\ \delta\_{(j)}(y,z) = (z - z^{2}) \cdot {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n} \rangle} \end{aligned} \\] @@ -730,7 +730,7 @@ If we combine all of the statements about \\(t\_{(j)0}\\) from all of the \\(j\\ \\[ \begin{aligned} - \sum_{j=0}^{m-1}t_{(j)}(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_{(j)} V_{(j)} + \sum_{j=0}^{m-1} \delta_{(j)}(y,z) B + x \sum_{j=0}^{m-1} T\_{(j)1} + x^{2} \sum_{j=0}^{m-1} T\_{(j)2} + \sum_{j=0}^{m-1}t_{(j)}(x) B + \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x) {\widetilde{B}} \stackrel{?}{=} z^2 \sum_{j=0}^{m-1} z_{(j)} V_{(j)} + \sum_{j=0}^{m-1} \delta_{(j)}(y,z) B + x \sum_{j=0}^{m-1} T\_{1, (j)} + x^{2} \sum_{j=0}^{m-1} T\_{2, (j)} \end{aligned} \\] @@ -740,8 +740,8 @@ We can combine the values and commitments by summing them directly. We can do th \begin{aligned} t(x) &= \sum_{j=0}^{m-1} t\_{(j)}(x)\\\\ {\tilde{t}}(x) &= \sum_{j=0}^{m-1}{\tilde{t}}\_{(j)}(x)\\\\ - T_1 &= \sum_{j=0}^{m-1} T_{(j)1}\\\\ - T_2 &= \sum_{j=0}^{m-1} T_{(j)2}\\\\ + T_1 &= \sum_{j=0}^{m-1} T_{1, (j)}\\\\ + T_2 &= \sum_{j=0}^{m-1} T_{2, (j)}\\\\ \delta(y,z) &= \sum_{j=0}^{m-1} \delta\_{(j)}(y,z)\\\\ &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} From a6705e273972795dc20be20452026bcf3e6867ec Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 11:52:36 -0700 Subject: [PATCH 168/186] add explanation for subscripts --- docs/notes.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index f1784a3e..02ca19e3 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -618,7 +618,9 @@ The aggregated range proof has the same form as the individual range proof, in t We will explain how one piece of the aggregated proof is generated for party \\(j\\), and then will show how all of the pieces for all of the \\(m\\) parties can be combined into one aggregated proof. -Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). We use the notation that the subscript \\({(j)}\\) denotes the \\(j\\)th party's value. +Let's introduce some notation to help with the explanation of the aggregated proof. The subscript \\({(j)}\\) denotes the \\(j\\)th party's value. For instance, \\(v_{(j)}\\) is the \\(v\\) value of the \\(j\\)th party; \\( \mathbf{a}\_{L, (j)}\\) is the \\( \mathbf{a}\_L \\) vector of the \\(j\\)th party; \\(\mathbf{l}\_{(0)}(x)\\) is the \\(\mathbf{l}(x)\\) polynomial of party \\(0\\). + +Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). Proving range statements with bit vectors ----------------------------------------- @@ -717,7 +719,7 @@ We can do this check using the [inner product proof](index.html#inner-product-pr Proving that \\(t_0\\) is correct --------------------------------- -Proving that \\(t\_{(j)0}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The relation over the commitments to prove is: +Proving that \\(t\_{0, (j)}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The relation over the commitments to prove is: \\[ \begin{aligned} @@ -726,7 +728,7 @@ Proving that \\(t\_{(j)0}\\) is correct requires first creating commitments to t \end{aligned} \\] -If we combine all of the statements about \\(t\_{(j)0}\\) from all of the \\(j\\) parties by adding them together, then we get: +If we combine all of the statements about \\(t\_{0, (j)}\\) from all of the \\(j\\) parties by adding them together, then we get: \\[ \begin{aligned} @@ -762,7 +764,7 @@ Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n \\] -Now instead of having to do \\(m\\) individual checks to prove that \\(t\_{(j)0}\\) for all parties \\(j\\) are correct, we can do the verification with one check using the combined values: +Now instead of having to do \\(m\\) individual checks to prove that \\(t\_{0, (j)}\\) for all parties \\(j\\) are correct, we can do the verification with one check using the combined values: \\[ \begin{aligned} From 36e00469050808eaf830355c9d4d19fa0d98c24b Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 12:01:05 -0700 Subject: [PATCH 169/186] touchups --- docs/notes.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 02ca19e3..9375c1ee 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -704,7 +704,7 @@ We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x) \end{aligned} \\] -We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party\\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a random linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party\\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. Now instead of having to do \\(m\\) individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties \\(j\\) are correct, we can do the verification with one check: @@ -719,7 +719,7 @@ We can do this check using the [inner product proof](index.html#inner-product-pr Proving that \\(t_0\\) is correct --------------------------------- -Proving that \\(t\_{0, (j)}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The relation over the commitments to prove is: +Proving that \\(t\_{0, (j)}\\) is correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\(t_0\\) is correct](index.html#proving-that-t_0-is-correct) step of the single-value range proof. The statement each party wants to prove is: \\[ \begin{aligned} @@ -736,7 +736,7 @@ If we combine all of the statements about \\(t\_{0, (j)}\\) from all of the \\(j \end{aligned} \\] -We can combine the values and commitments by summing them directly. We can do this instead of having to take a linear combination, because each party's values and commitments are already offset by the values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to that party. +We can combine the values and commitments by summing them directly. We can do this instead of having to take a random linear combination, because each party's values and commitments are already offset by the values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to that party. \\[ \begin{aligned} @@ -745,7 +745,14 @@ We can combine the values and commitments by summing them directly. We can do th T_1 &= \sum_{j=0}^{m-1} T_{1, (j)}\\\\ T_2 &= \sum_{j=0}^{m-1} T_{2, (j)}\\\\ \delta(y,z) &= \sum_{j=0}^{m-1} \delta\_{(j)}(y,z)\\\\ - &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ +\end{aligned} +\\] + +We can plug the equation for \\(\delta_{(j)}(y,z)\\) into the calculation for \\(\delta(y,z)\\): + +\\[ +\begin{aligned} + \delta(y, z) &= (z - z^{2}) \cdot \sum_{j=0}^{m-1} {\langle {\mathbf{1}}, {\mathbf{y}}^{n}\_{(j)} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle}\\\\ \end{aligned} \\] @@ -785,7 +792,7 @@ Since we know that \\(z\_{(j)} = z^j\\), we can rewrite the equation as follows: Proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct --------------------------------------------------------------------- -Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct](index.html#proving-that-mathbflx-mathbfrx-are-correct) step of the single-value range proof. The relation over the commitments to prove is: +Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are correct requires first creating commitments to the variables, and then proving a relation over the commitments. For an explanation of how the commitments are created and how the relation is derived, see the [proving that \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) are correct](index.html#proving-that-mathbflx-mathbfrx-are-correct) step of the single-value range proof. The statement that each party wants to prove is: \\[ \begin{aligned} From 7bdb4f9d5ff3cc1c3a1f742c2ebe3a9be2a563e8 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 14:07:16 -0700 Subject: [PATCH 170/186] better explanation in intro --- docs/notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 9375c1ee..6babb974 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -606,11 +606,11 @@ additional and final step involves sending a pair of scalars Aggregated Range Proof ====================== -We want to create an aggregated range proof for \\(m\\) values that is more efficient to create and verify than \\(m\\) individual range proofs. +We want to take advantage of the logarithmic size of the inner-product protocol, by creating an aggregated range proof for \\(m\\) values that is smaller than \\(m\\) individual range proofs. The aggregation protocol is a multi-party computation protocol, involving \\(m\\) parties (one party per value) and one dealer, where the parties don't reveal their secrets to each other. The parties share their commitments with the dealer, and the dealer generates and returns challenge variables. The parties then share their proof shares with the dealer, and the dealer combines their shares to create an aggregated proof. -The Bulletproofs paper outlines two versions of multi-party computation aggregation - one with a constant number of rounds but communication that is linear in both \\(m\\) and the binary encoding of the range, and one with a logarithmic number of rounds and communication that is only linear in \\(m\\). We chose to implement the first version because the two versions don't differ significantly in proof size, and the first version is more straightforward. +The Bulletproofs paper outlines two versions of multi-party computation aggregation. In the first approach, the inner-product proof is performed by the dealer, which requires sending the vectors used for the inner-product to the dealer. In the second approach, the inner-product proof is performed using multi-party computation, which sends less data but requires one round for each iteration of the inner-product protocol. We chose to implement the first approach because it requires fewer round trips between parties, which outweighed the slight message size savings of the second approach. For more information on how the aggregation protocol works and is implemented, see the [protocol notes](../aggregated_range_proof/index.html). From ee33c5f98e49d9bfef9ff16a0cf5f4dc2d94eb16 Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 14:29:25 -0700 Subject: [PATCH 171/186] change H to H' to make exponential ys less confusing --- docs/notes.md | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 6babb974..19b928ad 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -796,17 +796,22 @@ Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are corr \\[ \begin{aligned} - {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + {\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} \stackrel{?}{=} -{\widetilde{e}\_{(j)}} {\widetilde{B}} + A_{(j)} + x S_{(j)} - z{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + {\langle z {\mathbf{1}} + z^2 z_{(j)} \cdot (\mathbf{y}^{n}\_{(j)})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_{(j)}} \rangle} + {\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + {\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} \stackrel{?}{=} -{\widetilde{e}\_{(j)}} {\widetilde{B}} + A_{(j)} + x S_{(j)} - z{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + {\langle z \mathbf{y}^{n}\_{(j)} + z^2 z_{(j)} {\mathbf{2}}^n, {\mathbf{H}'}\_{(j)} \rangle} \end{aligned} \\] -\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}}\\), or \\({\mathbf{H}\_{[j\cdot n : (j+1)n - 1]}}\\). +\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}'\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}'}\\), or \\({\mathbf{H}'\_{[j\cdot n : (j+1)n - 1]}}\\). If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the \\(j\\) parties by adding them together, then we get: \\[ \begin{aligned} - \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} \stackrel{?}{=} -\sum_{j=0}^{m-1}{\widetilde{e}\_{(j)}} {\widetilde{B}} + \sum_{j=0}^{m-1}A_{(j)} + x \sum_{j=0}^{m-1}S_{(j)} - z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + \sum_{j=0}^{m-1}{\langle z {\mathbf{1}} + z^2 z_{(j)} \cdot (\mathbf{y}^{n}\_{(j)})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}\_{(j)}} \rangle} + \sum_{j=0}^{m-1}{\langle {\mathbf{l}}\_{(j)}(x), {\mathbf{G}\_{(j)}} \rangle} + + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} \stackrel{?}{=} + -\sum_{j=0}^{m-1}{\widetilde{e}\_{(j)}} {\widetilde{B}} + + \sum_{j=0}^{m-1}A_{(j)} + x \sum_{j=0}^{m-1}S_{(j)} - + z \sum_{j=0}^{m-1}{\langle {\mathbf{1}}, {\mathbf{G}\_{(j)}} \rangle} + + \sum_{j=0}^{m-1}{\langle z {\mathbf{y}^n_{(j)}} + z^2 z_{(j)} {\mathbf{2}}^n, {\mathbf{H}'\_{(j)}} \rangle} \end{aligned} \\] @@ -817,7 +822,7 @@ We can simplify this expression by making a few observations. We know that: {\mathbf{l}}(x) &= {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x) \\\\ {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ {\mathbf{G}} &= {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \\\\ - {\mathbf{H}} &= {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \\\\ + {\mathbf{H}'} &= {\mathbf{H}'}\_{(0)} || {\mathbf{H}'}\_{(1)} || \dots || {\mathbf{H}'}\_{(m-1)} \\\\ \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\\\ z_{(j)} &= z^j \end{aligned} @@ -833,13 +838,13 @@ Therefore, we can simplify the following statements: {\langle {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(m-1)} \rangle}\\\\ &= {\langle {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x), {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \rangle} \\\\ &= {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} \\\\ - \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x) \circ (\mathbf{y}^{n}\_{(j)})^{-1}, {\mathbf{H}}\_{(j)} \rangle} &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1}, {\mathbf{H}}\_{(0)} \rangle} + - {\langle {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1}, {\mathbf{H}}\_{(1)} \rangle} + + \sum_{j=0}^{m-1}{\langle {\mathbf{r}}\_{(j)}(x), {\mathbf{H}'}\_{(j)} \rangle} + &= {\langle {\mathbf{r}}\_{(0)}(x), {\mathbf{H}'}\_{(0)} \rangle} + + {\langle {\mathbf{r}}\_{(1)}(x), {\mathbf{H}'}\_{(1)} \rangle} + \dots + - {\langle {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(m-1)} \rangle} \\\\ - &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n}\_{(0)})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n}\_{(1)})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n}\_{(m-1)})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ - &= {\langle {\mathbf{r}}\_{(0)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[0 : n - 1]})^{-1} || {\mathbf{r}}\_{(1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[ n : 2 \cdot n - 1]})^{-1} || \dots || {\mathbf{r}}\_{(m-1)}(x) \circ (\mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n - 1]})^{-1}, {\mathbf{H}}\_{(0)} || {\mathbf{H}}\_{(1)} || \dots || {\mathbf{H}}\_{(m-1)} \rangle}\\\\ - &= {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}}, {\mathbf{H}} \rangle} + {\langle {\mathbf{r}}\_{(m-1)}(x), {\mathbf{H}'}\_{(m-1)} \rangle} \\\\ + &= {\langle {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x), {\mathbf{H}'}\_{(0)} || {\mathbf{H}'}\_{(1)} || \dots || {\mathbf{H}'}\_{(m-1)} \rangle}\\\\ + &= {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} \end{aligned} \\] @@ -857,7 +862,7 @@ With these observations, we can simplify the combined \\(m\\)-party statement ab \\[ \begin{aligned} - {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x) \circ {\mathbf{y}^{-n \cdot m}} , {\mathbf{H}} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{1}}, {\mathbf{H}} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot (\mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]})^{-1} \circ {\mathbf{2}}^n, {\mathbf{H}}\_{[j \cdot n : (j+1) \cdot n - 1]} \rangle} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{y}^{n \cdot m}}, {\mathbf{H}'} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{2}}^n, {\mathbf{H}'}\_{[j \cdot n : (j+1) \cdot n - 1]} \rangle} \end{aligned} \\] From 30b1c6710a21dd78b41f924d57b24cecb13d6e8b Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 15:56:47 -0700 Subject: [PATCH 172/186] change indexing to use pythonic notation, add agg proof notation section --- docs/notes.md | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 19b928ad..000fa8ae 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -618,13 +618,20 @@ The aggregated range proof has the same form as the individual range proof, in t We will explain how one piece of the aggregated proof is generated for party \\(j\\), and then will show how all of the pieces for all of the \\(m\\) parties can be combined into one aggregated proof. -Let's introduce some notation to help with the explanation of the aggregated proof. The subscript \\({(j)}\\) denotes the \\(j\\)th party's value. For instance, \\(v_{(j)}\\) is the \\(v\\) value of the \\(j\\)th party; \\( \mathbf{a}\_{L, (j)}\\) is the \\( \mathbf{a}\_L \\) vector of the \\(j\\)th party; \\(\mathbf{l}\_{(0)}(x)\\) is the \\(\mathbf{l}(x)\\) polynomial of party \\(0\\). +New notation for aggregated proofs +---------------------------------- -Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). +The subscript \\({(j)}\\) denotes the \\(j\\)th party's share. For instance, \\(v_{(j)}\\) is the \\(v\\) value of the \\(j\\)th party; \\( \mathbf{a}\_{L, (j)}\\) is the \\( \mathbf{a}\_L \\) vector of the \\(j\\)th party; \\(\mathbf{l}\_{(0)}(x)\\) is the \\(\mathbf{l}(x)\\) polynomial of party \\(0\\). + +We use pythonic notation to denote slices of vectors, such that \\(\mathbf{G}\_{[a:b]} = [\mathbf{G}\_{a}, \mathbf{G}\_{a+1}, \dots, \mathbf{G}\_{b-1} ]\\). + +\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n]}}\\), and \\({\mathbf{H}'\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}'}\\), or \\({\mathbf{H}'\_{[j\cdot n : (j+1)n]}}\\). Proving range statements with bit vectors ----------------------------------------- +Party \\(j\\) begins with a secret value \\(v_{(j)}\\), and wishes to convince the verifier that \\(v_{(j)} \in [0, 2^n)\\) without revealing \\(v_{(j)}\\). + We want to make statements about \\(v_{(j)}\\) using its bit vector representation, where the statements will be true if and only if \\(v_{(j)}\\) is actually in the expected range. We will not reproduce the steps or explanation here since it is the same as in the [proving range statements with bit vectors](index.html#proving-range-statements-with-bit-vectors) step of the single-value range proof. Here are the final statements for party \\(j\\): \\[ @@ -641,7 +648,7 @@ Proving vectors of statements with a single statement We want to combine the above three statements into a single statement for party \\(j\\), as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party \\(j\\). Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all \\(m\\) parties. First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties -, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), a length \\(n\\) slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party \\(j\\): +, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\), a length \\(n\\) slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party \\(j\\): \\[ \begin{aligned} @@ -756,15 +763,15 @@ We can plug the equation for \\(\delta_{(j)}(y,z)\\) into the calculation for \\ \end{aligned} \\] -Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\), we can simplify \\(\delta(y, z)\\): +Since we know that \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\), we can simplify \\(\delta(y, z)\\): \\[ \begin{aligned} \delta(y, z) &= (z - z^{2}) \cdot ( - {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[0 : n - 1]} \rangle + - \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[n : 2 \cdot n - 1]} \rangle + + {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[0 : n]} \rangle + + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[n : 2 \cdot n]} \rangle + \dots + - \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n - 1]} \rangle}) - + \langle {\mathbf{1}}, \mathbf{y}^{n \cdot m}\_{[(m-1) \cdot n : m \cdot n]} \rangle}) - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ &= (z - z^{2}) \cdot {\langle {\mathbf{1}}, \mathbf{y}^{n \cdot m} \rangle} - z^{3} \sum_{j=0}^{m-1} z\_{(j)} \cdot {\langle {\mathbf{1}}, {\mathbf{2}}^{n \cdot m} \rangle} \\\\ \end{aligned} @@ -800,8 +807,6 @@ Proving that \\({\mathbf{l}}\_{(j)}(x)\\), \\({\mathbf{r}}\_{(j)}(x)\\) are corr \end{aligned} \\] -\\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n - 1]}}\\), and \\({\mathbf{H}'\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}'}\\), or \\({\mathbf{H}'\_{[j\cdot n : (j+1)n - 1]}}\\). - If we combine all of the statements about \\({\mathbf{l}}(x)\\), \\({\mathbf{r}}(x)\\) from all the \\(j\\) parties by adding them together, then we get: \\[ @@ -823,7 +828,7 @@ We can simplify this expression by making a few observations. We know that: {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ {\mathbf{G}} &= {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \\\\ {\mathbf{H}'} &= {\mathbf{H}'}\_{(0)} || {\mathbf{H}'}\_{(1)} || \dots || {\mathbf{H}'}\_{(m-1)} \\\\ - \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n - 1]} \\\\ + \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\\\ z_{(j)} &= z^j \end{aligned} \\] @@ -862,7 +867,7 @@ With these observations, we can simplify the combined \\(m\\)-party statement ab \\[ \begin{aligned} - {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{y}^{n \cdot m}}, {\mathbf{H}'} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{2}}^n, {\mathbf{H}'}\_{[j \cdot n : (j+1) \cdot n - 1]} \rangle} + {\langle {\mathbf{l}}(x), {\mathbf{G}} \rangle} + {\langle {\mathbf{r}}(x), {\mathbf{H}'} \rangle} \stackrel{?}{=} -{\widetilde{e}} {\widetilde{B}} + A + x S - z{\langle {\mathbf{1}}, {\mathbf{G}} \rangle} + z{\langle {\mathbf{y}^{n \cdot m}}, {\mathbf{H}'} \rangle} + \sum_{j=0}^{m-1} {\langle z^{j+2} \cdot {\mathbf{2}}^n, {\mathbf{H}'}\_{[j \cdot n : (j+1) \cdot n]} \rangle} \end{aligned} \\] From b4c7c296aceae2d0130a1e23d95060d097a086cc Mon Sep 17 00:00:00 2001 From: Cathie Date: Wed, 2 May 2018 16:08:34 -0700 Subject: [PATCH 173/186] move definitions of z_j and y_j to the notation section --- docs/notes.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 000fa8ae..382b4140 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -627,6 +627,10 @@ We use pythonic notation to denote slices of vectors, such that \\(\mathbf{G}\_{ \\({\mathbf{G}\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{G}}\\), or \\({\mathbf{G}\_{[j\cdot n : (j+1)n]}}\\), and \\({\mathbf{H}'\_{(j)}}\\) is party \\(j\\)'s share of the generators \\({\mathbf{H}'}\\), or \\({\mathbf{H}'\_{[j\cdot n : (j+1)n]}}\\). +\\(z_{(j)}\\) is a scalar offset that is unique to each party \\(j\\), and is defined by \\(z_{(j)} = z^j\\). \\(\mathbf{y}^n\_{(j)}\\) is a length \\(n\\) vector offset that is unique to each party \\(j\\). It is a slice into vector \\(\mathbf{y}^{n \cdot m}\\), and is defined by \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\) + + + Proving range statements with bit vectors ----------------------------------------- @@ -645,10 +649,9 @@ We want to make statements about \\(v_{(j)}\\) using its bit vector representati Proving vectors of statements with a single statement ----------------------------------------------------- -We want to combine the above three statements into a single statement for party \\(j\\), as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally introduce challenge values \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party \\(j\\). Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all \\(m\\) parties. +We want to combine the above three statements into a single statement for party \\(j\\), as in the [proving vectors of statements](index.html#proving-vectors-of-statements-with-a-single-statement) step of the single-value range proof. We will additionally use offsets \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) that are unique to each party \\(j\\). Since these challenge values are independent for each party, we can later merge the per-party combined statements into one statement for all \\(m\\) parties. -First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties -, and offset by vector \\(\mathbf{y}^n\_{(j)} = \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\), a length \\(n\\) slice into vector \\(\mathbf{y}^{n \cdot m}\\) that is unique to each party \\(j\\): +First, we will combine each of the two vector-statements into a single statement using the verifier's choice of challenge value \\(y\\) that is shared across all parties, and offset by vector \\(\mathbf{y}^n\_{(j)}\\): \\[ \begin{aligned} @@ -659,7 +662,7 @@ First, we will combine each of the two vector-statements into a single statement \\] The three resulting statements can then be combined in the same way, -using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} = z^j\\) that is unique to each party \\(j\\): +using the verifier’s choice of challenge value \\(z\\) that is shared across all parties, and offset by scalar \\(z\_{(j)} \\) : \\[ \begin{aligned} z^{2} z\_{(j)} \cdot v_{(j)} @@ -711,7 +714,7 @@ We can combine the statements about \\(t\_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x) \end{aligned} \\] -We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a random linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party\\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. +We can add the \\(t_{(j)}(x)\\) values together to create \\(t(x)\\) instead of taking a random linear combination of \\(t_{(j)}(x)\\) values, because each \\(t_{(j)}(x)\\) is calculated with the \\(\mathbf{y}^n\_{(j)}\\) and \\(z_{(j)}\\) challenge variables that are unique to that party \\(j\\), so all of the \\(t_{(j)}(x)\\) values will be offset from one another. Now instead of having to do \\(m\\) individual checks to prove that \\(t_{(j)}(x)\\), \\({\mathbf{l}}\_{(j)}(x)\\), and \\({\mathbf{r}}\_{(j)}(x)\\) for all parties \\(j\\) are correct, we can do the verification with one check: From 193b5927478b8163ee4ca2a7d96be2e203d599db Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 16:10:17 -0700 Subject: [PATCH 174/186] aligned concatenation for l/r/G/H --- docs/notes.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/notes.md b/docs/notes.md index 382b4140..d2439bf5 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -827,12 +827,16 @@ We can simplify this expression by making a few observations. We know that: \\[ \begin{aligned} - {\mathbf{l}}(x) &= {\mathbf{l}}\_{(0)}(x) || {\mathbf{l}}\_{(1)}(x) || \dots || {\mathbf{l}}\_{(m-1)}(x) \\\\ - {\mathbf{r}}(x) &= {\mathbf{r}}\_{(0)}(x) || {\mathbf{r}}\_{(1)}(x) || \dots || {\mathbf{r}}\_{(m-1)}(x) \\\\ - {\mathbf{G}} &= {\mathbf{G}}\_{(0)} || {\mathbf{G}}\_{(1)} || \dots || {\mathbf{G}}\_{(m-1)} \\\\ - {\mathbf{H}'} &= {\mathbf{H}'}\_{(0)} || {\mathbf{H}'}\_{(1)} || \dots || {\mathbf{H}'}\_{(m-1)} \\\\ + &{\mathbf{l}}(x) &{}&=&{}& {\mathbf{l}}\_{(0)}(x) & {} &||& {} & {\mathbf{l}}\_{(1)}(x) & {} &||& {} & \dots & {} &||& {} & {\mathbf{l}}\_{(m-1)}(x) \\\\ + &{\mathbf{r}}(x) &{}&=&{}& {\mathbf{r}}\_{(0)}(x) & {} &||& {} & {\mathbf{r}}\_{(1)}(x) & {} &||& {} & \dots & {} &||& {} & {\mathbf{r}}\_{(m-1)}(x) \\\\ + &{\mathbf{G}} &{}&=&{}& {\mathbf{G}}\_{(0)} & {} &||& {} & {\mathbf{G}}\_{(1)} & {} &||& {} & \dots & {} &||& {} & {\mathbf{G}}\_{(m-1)} \\\\ + &{\mathbf{H}'} &{}&=&{}& {\mathbf{H}'}\_{(0)} & {} &||& {} & {\mathbf{H}'}\_{(1)} & {} &||& {} & \dots & {} &||& {} & {\mathbf{H}'}\_{(m-1)} +\end{aligned} +\\] +\\[ +\begin{aligned} \mathbf{y}^n\_{(j)} &= \mathbf{y}^{n \cdot m}\_{[j \cdot n : (j+1) \cdot n]} \\\\ - z_{(j)} &= z^j + z_{(j)} &= z^j \end{aligned} \\] From ada5481f9b7a152ed24c02f2abf46ef0e4512a44 Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Fri, 4 May 2018 11:28:05 -0700 Subject: [PATCH 175/186] Prevent a malicious dealer from retrieving the party's secrets @oleganza pointed out that when the dealer's polynomial challenge `x` is zero, the parties will leak secrets, and suggested this check. Informal analysis of the protocol flow: dealer -> party: position party -> dealer: `ValueCommitment` The `ValueCommitment` cannot leak information to the dealer, since it's blinded independently of any dealer messages dealer -> party: `ValueChallenge` Contains `y, z` challenges. party -> dealer: `PolyCommitment` Contains `T_1`, `T_2`, which are blinded independently from any dealer messages and therefore can't leak information dealer -> party: `PolyChallenge` Contains `x` challenge party -> dealer: `ProofShare` Up till now, we know that the each of the party's messages can't reveal info because they're blinded independently of the dealer messages. The paper notes that the blindings for the `l` and `r` vectors are chosen such that the prover can reveal `l(x), r(x)` for one challenge point `x \in \ZZ_p^{\times}` without revealing information, but if `x` is zero, then `\blinding t(x)` is just `z^2 \blinding v`, so the dealer could multiply by `z^2` and recover the blinding for the original commitment. However, if the party checks that `x` is nonzero, then `x \in \ZZ_p^{\times}`, the blinding factors are not annihilated, and the proof share does not leak information. In the previous (non-MPC) version of the code, the `x` was computed by the prover out of the proof transcript, and so it wasn't necessary to check that `x` is nonzero (which occurs with probability `~ 2^{-252}`): as noted by AGL, the instructions required to check this are more likely to fail than the check itself. Another concern is about replay attacks: a malicious dealer who was able to get the party to apply the same `PolyChallenge` multiple times could use polynomial interpolation to recover the coefficients of the `t` polynomial and reveal the party's secrets. (I think three points would be sufficient). However, because our encoding of the protocol flow into affine types ensures that each `Party` state can be used at most once, we are ensured that any compilable instantiation of the MPC protocol is invulnerable to replay attacks, since typechecking the program requires the compiler to prove that states are not reused. --- src/range_proof/mod.rs | 54 ++++++++++++++++++++++++++++++++++++---- src/range_proof/party.rs | 11 +++++--- 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index af042425..d76164f8 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -105,7 +105,8 @@ impl RangeProof { let proof_shares: Vec<_> = parties .into_iter() .map(|p| p.apply_challenge(&poly_challenge)) - .collect(); + // Collect the iterator of Results into a Result, then unwrap it + .collect::,_>>()?; dealer.receive_trusted_shares(&proof_shares) } @@ -445,10 +446,10 @@ mod tests { .receive_poly_commitments(&[poly_com0, poly_com1, poly_com2, poly_com3]) .unwrap(); - let share0 = party0.apply_challenge(&poly_challenge); - let share1 = party1.apply_challenge(&poly_challenge); - let share2 = party2.apply_challenge(&poly_challenge); - let share3 = party3.apply_challenge(&poly_challenge); + let share0 = party0.apply_challenge(&poly_challenge).unwrap(); + let share1 = party1.apply_challenge(&poly_challenge).unwrap(); + let share2 = party2.apply_challenge(&poly_challenge).unwrap(); + let share3 = party3.apply_challenge(&poly_challenge).unwrap(); match dealer.receive_shares(&mut rng, &[share0, share1, share2, share3]) { Ok(_proof) => { @@ -460,4 +461,47 @@ mod tests { } } } + + #[test] + fn detect_dishonest_dealer_during_aggregation() { + use self::dealer::*; + use self::party::*; + + // Simulate one party + let m = 1; + let n = 32; + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let v0 = rng.next_u32() as u64; + let v0_blinding = Scalar::random(&mut rng); + let party0 = Party::new(v0, v0_blinding, n, &generators).unwrap(); + + let dealer = Dealer::new(generators.all(), n, m, &mut transcript).unwrap(); + + // Now do the protocol flow as normal.... + + let (party0, value_com0) = party0.assign_position(0, &mut rng); + + let (dealer, value_challenge) = dealer + .receive_value_commitments(&[value_com0]) + .unwrap(); + + let (party0, poly_com0) = party0.apply_challenge(&value_challenge, &mut rng); + + let (_dealer, mut poly_challenge) = dealer + .receive_poly_commitments(&[poly_com0]) + .unwrap(); + + // But now simulate a malicious dealer choosing x = 0 + poly_challenge.x = Scalar::zero(); + + let maybe_share0 = party0.apply_challenge(&poly_challenge); + + // XXX when we have error types, check finer info than "was error" + assert!(maybe_share0.is_err()); + } } diff --git a/src/range_proof/party.rs b/src/range_proof/party.rs index 55f8a637..0508d65c 100644 --- a/src/range_proof/party.rs +++ b/src/range_proof/party.rs @@ -196,7 +196,12 @@ pub struct PartyAwaitingPolyChallenge { } impl PartyAwaitingPolyChallenge { - pub fn apply_challenge(self, pc: &PolyChallenge) -> ProofShare { + pub fn apply_challenge(self, pc: &PolyChallenge) -> Result { + // Prevent a malicious dealer from annihilating the blinding factors: + if pc.x == Scalar::zero() { + return Err("Poly challenge was zero, which would leak secrets, bailing out"); + } + let t_blinding_poly = util::Poly2( self.z * self.z * self.offset_z * self.v_blinding, self.t_1_blinding, @@ -209,7 +214,7 @@ impl PartyAwaitingPolyChallenge { let l_vec = self.l_poly.eval(pc.x); let r_vec = self.r_poly.eval(pc.x); - ProofShare { + Ok(ProofShare { value_commitment: self.value_commitment, poly_commitment: self.poly_commitment, t_x_blinding, @@ -217,6 +222,6 @@ impl PartyAwaitingPolyChallenge { e_blinding, l_vec, r_vec, - } + }) } } From 04a885ade271478873253b761d820cfc08168f22 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 13:25:46 -0700 Subject: [PATCH 176/186] generalize and factor out batch verification of range proofs --- src/range_proof/mod.rs | 137 +--------------- src/range_proof/verification.rs | 274 ++++++++++++++++++++++++++++++++ 2 files changed, 282 insertions(+), 129 deletions(-) create mode 100644 src/range_proof/verification.rs diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index d76164f8..a2f80e45 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -3,23 +3,19 @@ use rand::Rng; -use std::iter; - -use curve25519_dalek::ristretto; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; -use curve25519_dalek::traits::IsIdentity; use generators::{Generators, GeneratorsView}; use inner_product_proof::InnerProductProof; use proof_transcript::ProofTranscript; -use util; // Modules for MPC protocol pub mod dealer; pub mod messages; pub mod party; +mod verification; /// The `RangeProof` struct represents a single range proof. #[derive(Serialize, Deserialize, Clone, Debug)] @@ -123,7 +119,7 @@ impl RangeProof { transcript: &mut ProofTranscript, rng: &mut R, n: usize, - ) -> Result<(), ()> { + ) -> Result<(), &'static str> { self.verify(&[*V], gens, transcript, rng, n) } @@ -137,106 +133,15 @@ impl RangeProof { transcript: &mut ProofTranscript, rng: &mut R, n: usize, - ) -> Result<(), ()> { - // First, replay the "interactive" protocol using the proof - // data to recompute all challenges. - - let m = value_commitments.len(); - - transcript.commit_u64(n as u64); - transcript.commit_u64(m as u64); - - for V in value_commitments.iter() { - transcript.commit(V.compress().as_bytes()); - } - transcript.commit(self.A.compress().as_bytes()); - transcript.commit(self.S.compress().as_bytes()); - - let y = transcript.challenge_scalar(); - let z = transcript.challenge_scalar(); - let zz = z * z; - let minus_z = -z; - - transcript.commit(self.T_1.compress().as_bytes()); - transcript.commit(self.T_2.compress().as_bytes()); - - let x = transcript.challenge_scalar(); - - transcript.commit(self.t_x.as_bytes()); - transcript.commit(self.t_x_blinding.as_bytes()); - transcript.commit(self.e_blinding.as_bytes()); - - let w = transcript.challenge_scalar(); - - // Challenge value for batching statements to be verified - let c = Scalar::random(rng); - - let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); - let s_inv = s.iter().rev(); - - let a = self.ipp_proof.a; - let b = self.ipp_proof.b; - - // Construct concat_z_and_2, an iterator of the values of - // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n - let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); - let powers_of_z = util::exp_iter(z).take(m); - let concat_z_and_2 = - powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); - - let g = s.iter().map(|s_i| minus_z - a * s_i); - let h = s_inv - .zip(util::exp_iter(y.invert())) - .zip(concat_z_and_2) - .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); - - let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); - let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); - - let mega_check = ristretto::vartime::multiscalar_mul( - iter::once(Scalar::one()) - .chain(iter::once(x)) - .chain(value_commitment_scalars) - .chain(iter::once(c * x)) - .chain(iter::once(c * x * x)) - .chain(iter::once(-self.e_blinding - c * self.t_x_blinding)) - .chain(iter::once(basepoint_scalar)) - .chain(g) - .chain(h) - .chain(x_sq.iter().cloned()) - .chain(x_inv_sq.iter().cloned()), - iter::once(&self.A) - .chain(iter::once(&self.S)) - .chain(value_commitments.iter()) - .chain(iter::once(&self.T_1)) - .chain(iter::once(&self.T_2)) - .chain(iter::once(&gens.pedersen_generators.B_blinding)) - .chain(iter::once(&gens.pedersen_generators.B)) - .chain(gens.G.iter()) - .chain(gens.H.iter()) - .chain(self.ipp_proof.L_vec.iter()) - .chain(self.ipp_proof.R_vec.iter()), - ); - - if mega_check.is_identity() { - Ok(()) - } else { - Err(()) - } + ) -> Result<(), &'static str> { + RangeProof::verify_batch( + &[self.prepare_verification(value_commitments, transcript, rng, n)], + gens, + rng + ) } } -/// Compute -/// \\[ -/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle -/// \\] -fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let sum_y = util::sum_of_powers(y, n * m); - let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); - let sum_z = util::sum_of_powers(z, m); - - (z - z * z) * sum_y - z * z * z * sum_2 * sum_z -} #[cfg(test)] mod tests { @@ -245,32 +150,6 @@ mod tests { use generators::PedersenGenerators; - #[test] - fn test_delta() { - let mut rng = OsRng::new().unwrap(); - let y = Scalar::random(&mut rng); - let z = Scalar::random(&mut rng); - - // Choose n = 256 to ensure we overflow the group order during - // the computation, to check that that's done correctly - let n = 256; - - // code copied from previous implementation - let z2 = z * z; - let z3 = z2 * z; - let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..n { - power_g += (z - z2) * exp_y - z3 * exp_2; - - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - assert_eq!(power_g, delta(n, 1, &y, &z),); - } - /// Given a bitsize `n`, test the following: /// /// 1. Generate `m` random values and create a proof they are all in range; diff --git a/src/range_proof/verification.rs b/src/range_proof/verification.rs new file mode 100644 index 00000000..9928999c --- /dev/null +++ b/src/range_proof/verification.rs @@ -0,0 +1,274 @@ +#![allow(non_snake_case)] + +use rand::Rng; + +use std::iter; +use std::borrow::Borrow; + +use curve25519_dalek::ristretto; +use curve25519_dalek::ristretto::RistrettoPoint; +use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::IsIdentity; + +use generators::GeneratorsView; +use proof_transcript::ProofTranscript; +use range_proof::RangeProof; +use util; + +/// Represents a deferred computation to verify a single rangeproof. +/// Multiple instances can be verified more efficient as a batch using +/// `RangeProof::verify_batch` function. +pub struct Verification { + /// Number of commitments in the aggregated proof + m: usize, + + /// Size of the range in bits + n: usize, + + /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. + pedersen_base_scalars: (Scalar, Scalar), + + /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + g_scalars: Vec, + + /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + h_scalars: Vec, + + /// List of scalars for any number of dynamic bases. + dynamic_base_scalars: Vec, + + /// List of dynamic bases for the corresponding scalars. + dynamic_bases: Vec, +} + +impl RangeProof { + + /// Prepares a `Verification` struct + /// that can be combined with others in a batch. + pub fn prepare_verification( + &self, + value_commitments: &[RistrettoPoint], + transcript: &mut ProofTranscript, + rng: &mut R, + n: usize, + ) -> Verification { + // First, replay the "interactive" protocol using the proof + // data to recompute all challenges. + + let m = value_commitments.as_ref().len(); + + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + + for V in value_commitments.as_ref().iter() { + transcript.commit(V.borrow().compress().as_bytes()); + } + transcript.commit(self.A.compress().as_bytes()); + transcript.commit(self.S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + let zz = z * z; + let minus_z = -z; + + transcript.commit(self.T_1.compress().as_bytes()); + transcript.commit(self.T_2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + transcript.commit(self.t_x.as_bytes()); + transcript.commit(self.t_x_blinding.as_bytes()); + transcript.commit(self.e_blinding.as_bytes()); + + let w = transcript.challenge_scalar(); + + // Challenge value for batching statements to be verified + let c = Scalar::random(rng); + + let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); + let s_inv = s.iter().rev(); + + let a = self.ipp_proof.a; + let b = self.ipp_proof.b; + + // Construct concat_z_and_2, an iterator of the values of + // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n + let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); + let powers_of_z = util::exp_iter(z).take(m); + let concat_z_and_2 = + powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); + + let g = s.iter().map(|s_i| minus_z - a * s_i); + let h = s_inv + .zip(util::exp_iter(y.invert())) + .zip(concat_z_and_2) + .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); + + let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); + let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); + + Verification { + m, + n, + pedersen_base_scalars: (basepoint_scalar, -self.e_blinding - c * self.t_x_blinding), + g_scalars: g.collect(), + h_scalars: h.collect(), + dynamic_base_scalars: + iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(value_commitment_scalars) + .chain(iter::once(c * x)) + .chain(iter::once(c * x * x)) + .chain(x_sq.iter().cloned()) + .chain(x_inv_sq.iter().cloned()) + .collect(), + dynamic_bases: + iter::once(&self.A) + .chain(iter::once(&self.S)) + .chain(value_commitments.iter()) + .chain(iter::once(&self.T_1)) + .chain(iter::once(&self.T_2)) + .chain(self.ipp_proof.L_vec.iter()) + .chain(self.ipp_proof.R_vec.iter()) + .cloned() + .collect() + } + } + + /// Verifies multiple range proofs at once. + /// If any range proof is invalid, the whole batch is invalid. + /// All proofs must use the same range of `n` bits, + /// but are allowed to have different number of proven values. + /// You must provide big enough view into generators (`gens`) that covers + /// the biggest proof. + pub fn verify_batch>( + batch: &[V], + gens: GeneratorsView, + rng: &mut R + ) -> Result<(), &'static str> { + // we will special-case the first item to avoid unnecessary multiplication, + // so lets check that we have at least one item. + if batch.len() == 0 { + return Ok(()) + } + + let n = batch[0].borrow().n; + + // Make sure all proofs use the same range + if batch.iter().any(|v| v.borrow().n != n) { + return Err("Inconsistent range size `n` for all proofs in a batch") + } + + // Make sure we have enough static generators + let m = batch.iter().map(|v| v.borrow().m).max().unwrap_or(0); + if gens.G.len() < (n * m) { + return Err("The generators view does not have enough generators for the largest proof") + } + + // First statement is used without a random factor + let mut pedersen_base_scalars: (Scalar, Scalar) = batch[0].borrow().pedersen_base_scalars; + let mut g_scalars: Vec = batch[0].borrow().g_scalars.clone(); + let mut h_scalars: Vec = batch[0].borrow().h_scalars.clone(); + + // pad static scalars to the largest proof + g_scalars.resize(n*m, Scalar::zero()); + h_scalars.resize(n*m, Scalar::zero()); + + let mut dynamic_base_scalars: Vec = batch[0].borrow().dynamic_base_scalars.clone(); + let mut dynamic_bases: Vec = batch[0].borrow().dynamic_bases.clone(); + + // Other statements are added with a random factor per statement + for borrowable_verification in &batch[1..] { + let verification = borrowable_verification.borrow(); + let batch_challenge = Scalar::random(rng); + + pedersen_base_scalars.0 = pedersen_base_scalars.0 + batch_challenge*verification.pedersen_base_scalars.0; + pedersen_base_scalars.1 = pedersen_base_scalars.1 + batch_challenge*verification.pedersen_base_scalars.1; + + g_scalars = g_scalars.iter() + .zip(verification.g_scalars.iter()) + .map(|(total, s)| total + batch_challenge*s ) + .collect(); + + h_scalars = h_scalars.iter() + .zip(verification.h_scalars.iter()) + .map(|(total, s)| total + batch_challenge*s ) + .collect(); + + dynamic_base_scalars = dynamic_base_scalars.iter() + .cloned() + .chain(verification.dynamic_base_scalars.iter().map(|s| batch_challenge*s )) + .collect(); + + dynamic_bases = dynamic_bases.iter() + .chain(verification.dynamic_bases.iter()) + .cloned() + .collect(); + } + + let mega_check = ristretto::vartime::multiscalar_mul( + iter::once(&pedersen_base_scalars.0) + .chain(iter::once(&pedersen_base_scalars.1)) + .chain(g_scalars.iter()) + .chain(h_scalars.iter()) + .chain(dynamic_base_scalars.iter()), + iter::once(&gens.pedersen_generators.B) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(gens.G.iter()) + .chain(gens.H.iter()) + .chain(dynamic_bases.iter()) + ); + + if mega_check.is_identity() { + Ok(()) + } else { + Err("Batch verification failed") + } + } +} + +/// Compute +/// \\[ +/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle +/// \\] +fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { + let sum_y = util::sum_of_powers(y, n * m); + let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let sum_z = util::sum_of_powers(z, m); + + (z - z * z) * sum_y - z * z * z * sum_2 * sum_z +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::OsRng; + + #[test] + fn test_delta() { + let mut rng = OsRng::new().unwrap(); + let y = Scalar::random(&mut rng); + let z = Scalar::random(&mut rng); + + // Choose n = 256 to ensure we overflow the group order during + // the computation, to check that that's done correctly + let n = 256; + + // code copied from previous implementation + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + assert_eq!(power_g, delta(n, 1, &y, &z),); + } +} \ No newline at end of file From ed6b0f2eabb5e1351d64afac63f40a2893eb0893 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 14:02:08 -0700 Subject: [PATCH 177/186] allow rangeproofs with different n,m to be batched --- src/range_proof/mod.rs | 193 ++++++++++++++++++++++++++------ src/range_proof/verification.rs | 35 +++--- 2 files changed, 170 insertions(+), 58 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index a2f80e45..33cd3994 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -166,42 +166,7 @@ mod tests { // Both prover and verifier have access to the generators and the proof let generators = Generators::new(PedersenGenerators::default(), n, m); - // Serialized proof data - let proof_bytes: Vec; - let value_commitments: Vec; - - // Prover's scope - { - // 1. Generate the proof - - let mut rng = OsRng::new().unwrap(); - let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let (min, max) = (0u64, ((1u128 << n) - 1) as u64); - let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); - let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); - - let proof = RangeProof::prove_multiple( - &generators, - &mut transcript, - &mut rng, - &values, - &blindings, - n, - ).unwrap(); - - // 2. Serialize - proof_bytes = bincode::serialize(&proof).unwrap(); - - let pg = &generators.all().pedersen_generators; - - // XXX would be nice to have some convenience API for this - value_commitments = values - .iter() - .zip(blindings.iter()) - .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) - .collect(); - } + let (proof_bytes, value_commitments) = singleparty_create_helper(n,m); println!( "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", @@ -233,6 +198,55 @@ mod tests { } } + /// Generates a `n`-bit rangeproof for `m` commitments. + /// Returns serialized proof and the list of commitments. + fn singleparty_create_helper(n: usize, m: usize) -> (Vec, Vec) { + // Split the test into two scopes, so that it's explicit what + // data is shared between the prover and the verifier. + + // Use bincode for serialization + use bincode; + + // Both prover and verifier have access to the generators and the proof + let generators = Generators::new(PedersenGenerators::default(), n, m); + + // Serialized proof data + let proof_bytes: Vec; + let value_commitments: Vec; + + // 1. Generate the proof + + let mut rng = OsRng::new().unwrap(); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let (min, max) = (0u64, ((1u128 << n) - 1) as u64); + let values: Vec = (0..m).map(|_| rng.gen_range(min, max)).collect(); + let blindings: Vec = (0..m).map(|_| Scalar::random(&mut rng)).collect(); + + let proof = RangeProof::prove_multiple( + &generators, + &mut transcript, + &mut rng, + &values, + &blindings, + n, + ).unwrap(); + + // 2. Serialize + proof_bytes = bincode::serialize(&proof).unwrap(); + + let pg = &generators.all().pedersen_generators; + + // XXX would be nice to have some convenience API for this + value_commitments = values + .iter() + .zip(blindings.iter()) + .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .collect(); + + (proof_bytes, value_commitments) + } + #[test] fn create_and_verify_n_32_m_1() { singleparty_create_and_verify_helper(32, 1); @@ -273,6 +287,113 @@ mod tests { singleparty_create_and_verify_helper(64, 8); } + #[test] + fn batch_verify_n_32_m_1() { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let n = 32; + let m = 1; + let (p1, vc1) = singleparty_create_helper(n,m); + let (p2, vc2) = singleparty_create_helper(n,m); + let (p3, vc3) = singleparty_create_helper(n,m); + + let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, n); + let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, n); + let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, n); + + let generators = Generators::new(PedersenGenerators::default(), n, m); + + assert!( + RangeProof::verify_batch( + &[ver1, ver2, ver3], + generators.all(), + &mut rng + ).is_ok() + ); + } + + #[test] + fn batch_verify_n_64_m_differ() { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let n = 64; + let (p1, vc1) = singleparty_create_helper(n,1); + let (p2, vc2) = singleparty_create_helper(n,2); + let (p3, vc3) = singleparty_create_helper(n,4); + + let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, n); + let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, n); + let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, n); + + let generators = Generators::new(PedersenGenerators::default(), n, 4); + + assert!( + RangeProof::verify_batch( + &[ver1, ver2, ver3], + generators.all(), + &mut rng + ).is_ok() + ); + } + + #[test] + fn batch_verify_n_differ_m_differ_total_64() { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let (p1, vc1) = singleparty_create_helper(64,1); + let (p2, vc2) = singleparty_create_helper(32,2); + let (p3, vc3) = singleparty_create_helper(16,4); + + let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, 64); + let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, 32); + let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, 16); + + let generators = Generators::new(PedersenGenerators::default(), 64, 4); + + assert!( + RangeProof::verify_batch( + &[ver1, ver2, ver3], + generators.all(), + &mut rng + ).is_ok() + ); + } + + #[test] + fn batch_verify_n_differ_m_differ_total_256() { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let (p1, vc1) = singleparty_create_helper(16,1); + let (p2, vc2) = singleparty_create_helper(32,2); + let (p3, vc3) = singleparty_create_helper(64,4); + + let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, 16); + let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, 32); + let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, 64); + + let generators = Generators::new(PedersenGenerators::default(), 64, 4); + + assert!( + RangeProof::verify_batch( + &[ver1, ver2, ver3], + generators.all(), + &mut rng + ).is_ok() + ); + } + #[test] fn detect_dishonest_party_during_aggregation() { use self::dealer::*; diff --git a/src/range_proof/verification.rs b/src/range_proof/verification.rs index 9928999c..3af75e01 100644 --- a/src/range_proof/verification.rs +++ b/src/range_proof/verification.rs @@ -139,10 +139,9 @@ impl RangeProof { /// Verifies multiple range proofs at once. /// If any range proof is invalid, the whole batch is invalid. - /// All proofs must use the same range of `n` bits, - /// but are allowed to have different number of proven values. + /// Proofs may use different ranges (`n`) or different number of aggregated commitments (`m`). /// You must provide big enough view into generators (`gens`) that covers - /// the biggest proof. + /// the biggest proof pub fn verify_batch>( batch: &[V], gens: GeneratorsView, @@ -154,14 +153,8 @@ impl RangeProof { return Ok(()) } - let n = batch[0].borrow().n; - - // Make sure all proofs use the same range - if batch.iter().any(|v| v.borrow().n != n) { - return Err("Inconsistent range size `n` for all proofs in a batch") - } - // Make sure we have enough static generators + let n = batch.iter().map(|v| v.borrow().n).max().unwrap_or(0); let m = batch.iter().map(|v| v.borrow().m).max().unwrap_or(0); if gens.G.len() < (n * m) { return Err("The generators view does not have enough generators for the largest proof") @@ -175,7 +168,7 @@ impl RangeProof { // pad static scalars to the largest proof g_scalars.resize(n*m, Scalar::zero()); h_scalars.resize(n*m, Scalar::zero()); - + let mut dynamic_base_scalars: Vec = batch[0].borrow().dynamic_base_scalars.clone(); let mut dynamic_bases: Vec = batch[0].borrow().dynamic_bases.clone(); @@ -184,18 +177,16 @@ impl RangeProof { let verification = borrowable_verification.borrow(); let batch_challenge = Scalar::random(rng); - pedersen_base_scalars.0 = pedersen_base_scalars.0 + batch_challenge*verification.pedersen_base_scalars.0; - pedersen_base_scalars.1 = pedersen_base_scalars.1 + batch_challenge*verification.pedersen_base_scalars.1; - - g_scalars = g_scalars.iter() - .zip(verification.g_scalars.iter()) - .map(|(total, s)| total + batch_challenge*s ) - .collect(); + pedersen_base_scalars.0 += batch_challenge*verification.pedersen_base_scalars.0; + pedersen_base_scalars.1 += batch_challenge*verification.pedersen_base_scalars.1; - h_scalars = h_scalars.iter() - .zip(verification.h_scalars.iter()) - .map(|(total, s)| total + batch_challenge*s ) - .collect(); + // Note: this loop may be shorter than the total amount of scalars if `m < max({m})` + for (i, s) in verification.g_scalars.iter().enumerate() { + g_scalars[i] += batch_challenge*s; + } + for (i, s) in verification.h_scalars.iter().enumerate() { + h_scalars[i] += batch_challenge*s; + } dynamic_base_scalars = dynamic_base_scalars.iter() .cloned() From ae0239f86fb8d6e7df21d246b7f28a7e72e16d8e Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 14:11:08 -0700 Subject: [PATCH 178/186] refactor batch verification tests --- src/range_proof/mod.rs | 132 +++++++++++++---------------------------- 1 file changed, 41 insertions(+), 91 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 33cd3994..b764e33f 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -198,6 +198,40 @@ mod tests { } } + /// Generates and verifies a number of proofs in a batch + /// with the given pairs of `n,m` parameters (range in bits, number of commitments). + fn batch_verify_helper(nm: &[(usize, usize)]) { + use bincode; + + let mut rng = OsRng::new().unwrap(); + let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + + let max_n = nm.iter().map(|(n,_)| *n).max().unwrap_or(0); + let max_m = nm.iter().map(|(_,m)| *m).max().unwrap_or(0); + let verifications = nm.iter().map(|(n,m)| { + let (p, vc) = singleparty_create_helper(*n,*m); + bincode::deserialize::(&p) + .unwrap() + .prepare_verification( + &vc, + &mut transcript.clone(), + &mut rng, + *n + ) + }).collect::>(); + + let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); + + assert!( + RangeProof::verify_batch( + verifications.as_slice(), + generators.all(), + &mut rng + ).is_ok() + ); + } + + /// Generates a `n`-bit rangeproof for `m` commitments. /// Returns serialized proof and the list of commitments. fn singleparty_create_helper(n: usize, m: usize) -> (Vec, Vec) { @@ -289,109 +323,25 @@ mod tests { #[test] fn batch_verify_n_32_m_1() { - use bincode; - - let mut rng = OsRng::new().unwrap(); - let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let n = 32; - let m = 1; - let (p1, vc1) = singleparty_create_helper(n,m); - let (p2, vc2) = singleparty_create_helper(n,m); - let (p3, vc3) = singleparty_create_helper(n,m); - - let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, n); - let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, n); - let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, n); - - let generators = Generators::new(PedersenGenerators::default(), n, m); - - assert!( - RangeProof::verify_batch( - &[ver1, ver2, ver3], - generators.all(), - &mut rng - ).is_ok() - ); + batch_verify_helper(&[(32, 1)]); + batch_verify_helper(&[(32, 1), (32, 1)]); + batch_verify_helper(&[(32, 1), (32, 1), (32, 1)]); } #[test] fn batch_verify_n_64_m_differ() { - use bincode; - - let mut rng = OsRng::new().unwrap(); - let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let n = 64; - let (p1, vc1) = singleparty_create_helper(n,1); - let (p2, vc2) = singleparty_create_helper(n,2); - let (p3, vc3) = singleparty_create_helper(n,4); - - let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, n); - let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, n); - let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, n); - - let generators = Generators::new(PedersenGenerators::default(), n, 4); - - assert!( - RangeProof::verify_batch( - &[ver1, ver2, ver3], - generators.all(), - &mut rng - ).is_ok() - ); + batch_verify_helper(&[(32, 1), (32, 2)]); + batch_verify_helper(&[(32, 1), (32, 2), (32, 4)]); } #[test] fn batch_verify_n_differ_m_differ_total_64() { - use bincode; - - let mut rng = OsRng::new().unwrap(); - let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let (p1, vc1) = singleparty_create_helper(64,1); - let (p2, vc2) = singleparty_create_helper(32,2); - let (p3, vc3) = singleparty_create_helper(16,4); - - let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, 64); - let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, 32); - let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, 16); - - let generators = Generators::new(PedersenGenerators::default(), 64, 4); - - assert!( - RangeProof::verify_batch( - &[ver1, ver2, ver3], - generators.all(), - &mut rng - ).is_ok() - ); + batch_verify_helper(&[(64, 1), (32, 2), (16,4)]); } #[test] fn batch_verify_n_differ_m_differ_total_256() { - use bincode; - - let mut rng = OsRng::new().unwrap(); - let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - - let (p1, vc1) = singleparty_create_helper(16,1); - let (p2, vc2) = singleparty_create_helper(32,2); - let (p3, vc3) = singleparty_create_helper(64,4); - - let ver1 = bincode::deserialize::(&p1).unwrap().prepare_verification(&vc1, &mut transcript.clone(), &mut rng, 16); - let ver2 = bincode::deserialize::(&p2).unwrap().prepare_verification(&vc2, &mut transcript.clone(), &mut rng, 32); - let ver3 = bincode::deserialize::(&p3).unwrap().prepare_verification(&vc3, &mut transcript.clone(), &mut rng, 64); - - let generators = Generators::new(PedersenGenerators::default(), 64, 4); - - assert!( - RangeProof::verify_batch( - &[ver1, ver2, ver3], - generators.all(), - &mut rng - ).is_ok() - ); + batch_verify_helper(&[(16, 1), (32, 2), (64,4)]); } #[test] From 87fc9f59c395b1d17171afaf66e9f9c9cbd72704 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 14:56:11 -0700 Subject: [PATCH 179/186] make sure Verification type is not dropped w/o verification --- src/range_proof/mod.rs | 9 ++++--- src/range_proof/verification.rs | 47 ++++++++++++++++++++++----------- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index b764e33f..f4d4abe5 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -135,7 +135,7 @@ impl RangeProof { n: usize, ) -> Result<(), &'static str> { RangeProof::verify_batch( - &[self.prepare_verification(value_commitments, transcript, rng, n)], + [self.prepare_verification(value_commitments, transcript, rng, n)], gens, rng ) @@ -206,8 +206,6 @@ mod tests { let mut rng = OsRng::new().unwrap(); let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - let max_n = nm.iter().map(|(n,_)| *n).max().unwrap_or(0); - let max_m = nm.iter().map(|(_,m)| *m).max().unwrap_or(0); let verifications = nm.iter().map(|(n,m)| { let (p, vc) = singleparty_create_helper(*n,*m); bincode::deserialize::(&p) @@ -220,11 +218,14 @@ mod tests { ) }).collect::>(); + let max_n = verifications.iter().map(|v| v.n).max().unwrap_or(0); + let max_m = verifications.iter().map(|v| v.m).max().unwrap_or(0); + let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); assert!( RangeProof::verify_batch( - verifications.as_slice(), + verifications, generators.all(), &mut rng ).is_ok() diff --git a/src/range_proof/verification.rs b/src/range_proof/verification.rs index 3af75e01..3ca2284e 100644 --- a/src/range_proof/verification.rs +++ b/src/range_proof/verification.rs @@ -20,10 +20,10 @@ use util; /// `RangeProof::verify_batch` function. pub struct Verification { /// Number of commitments in the aggregated proof - m: usize, + pub m: usize, /// Size of the range in bits - n: usize, + pub n: usize, /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. pedersen_base_scalars: (Scalar, Scalar), @@ -41,6 +41,19 @@ pub struct Verification { /// List of dynamic bases for the corresponding scalars. dynamic_bases: Vec, + + /// Internal flag that prevents accidentally dropping + /// this struct without properly verifying it first. + verified: bool, +} + +impl Drop for Verification { + fn drop(&mut self) { + if !self.verified { + panic!("Deferred range proof Verification was not explicitly \ + verified using `RangeProof::verify_batch`!"); + } + } } impl RangeProof { @@ -133,7 +146,8 @@ impl RangeProof { .chain(self.ipp_proof.L_vec.iter()) .chain(self.ipp_proof.R_vec.iter()) .cloned() - .collect() + .collect(), + verified: false } } @@ -142,11 +156,13 @@ impl RangeProof { /// Proofs may use different ranges (`n`) or different number of aggregated commitments (`m`). /// You must provide big enough view into generators (`gens`) that covers /// the biggest proof - pub fn verify_batch>( - batch: &[V], - gens: GeneratorsView, + pub fn verify_batch>( + mut batch: B, + gens: GeneratorsView, rng: &mut R ) -> Result<(), &'static str> { + let batch = batch.as_mut(); + // we will special-case the first item to avoid unnecessary multiplication, // so lets check that we have at least one item. if batch.len() == 0 { @@ -154,27 +170,28 @@ impl RangeProof { } // Make sure we have enough static generators - let n = batch.iter().map(|v| v.borrow().n).max().unwrap_or(0); - let m = batch.iter().map(|v| v.borrow().m).max().unwrap_or(0); + let n = batch.iter().map(|v| v.n).max().unwrap_or(0); + let m = batch.iter().map(|v| v.m).max().unwrap_or(0); if gens.G.len() < (n * m) { return Err("The generators view does not have enough generators for the largest proof") } // First statement is used without a random factor - let mut pedersen_base_scalars: (Scalar, Scalar) = batch[0].borrow().pedersen_base_scalars; - let mut g_scalars: Vec = batch[0].borrow().g_scalars.clone(); - let mut h_scalars: Vec = batch[0].borrow().h_scalars.clone(); + batch[0].verified = true; + let mut pedersen_base_scalars: (Scalar, Scalar) = batch[0].pedersen_base_scalars; + let mut g_scalars: Vec = batch[0].g_scalars.clone(); + let mut h_scalars: Vec = batch[0].h_scalars.clone(); // pad static scalars to the largest proof g_scalars.resize(n*m, Scalar::zero()); h_scalars.resize(n*m, Scalar::zero()); - let mut dynamic_base_scalars: Vec = batch[0].borrow().dynamic_base_scalars.clone(); - let mut dynamic_bases: Vec = batch[0].borrow().dynamic_bases.clone(); + let mut dynamic_base_scalars: Vec = batch[0].dynamic_base_scalars.clone(); + let mut dynamic_bases: Vec = batch[0].dynamic_bases.clone(); // Other statements are added with a random factor per statement - for borrowable_verification in &batch[1..] { - let verification = borrowable_verification.borrow(); + for verification in &mut batch[1..] { + verification.verified = true; let batch_challenge = Scalar::random(rng); pedersen_base_scalars.0 += batch_challenge*verification.pedersen_base_scalars.0; From 44f23d9c329ed29af6bc8e4d42753bb5d39a4656 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Thu, 3 May 2018 15:01:43 -0700 Subject: [PATCH 180/186] cargo fmt --- src/range_proof/mod.rs | 45 +++++------ src/range_proof/verification.rs | 133 +++++++++++++++++--------------- 2 files changed, 90 insertions(+), 88 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index f4d4abe5..1fe02bea 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -135,9 +135,11 @@ impl RangeProof { n: usize, ) -> Result<(), &'static str> { RangeProof::verify_batch( - [self.prepare_verification(value_commitments, transcript, rng, n)], - gens, - rng + [ + self.prepare_verification(value_commitments, transcript, rng, n), + ], + gens, + rng, ) } } @@ -166,7 +168,7 @@ mod tests { // Both prover and verifier have access to the generators and the proof let generators = Generators::new(PedersenGenerators::default(), n, m); - let (proof_bytes, value_commitments) = singleparty_create_helper(n,m); + let (proof_bytes, value_commitments) = singleparty_create_helper(n, m); println!( "Aggregated rangeproof of m={} proofs of n={} bits has size {} bytes", @@ -206,30 +208,21 @@ mod tests { let mut rng = OsRng::new().unwrap(); let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - let verifications = nm.iter().map(|(n,m)| { - let (p, vc) = singleparty_create_helper(*n,*m); - bincode::deserialize::(&p) - .unwrap() - .prepare_verification( - &vc, - &mut transcript.clone(), - &mut rng, - *n - ) - }).collect::>(); + let verifications = nm.iter() + .map(|(n, m)| { + let (p, vc) = singleparty_create_helper(*n, *m); + bincode::deserialize::(&p) + .unwrap() + .prepare_verification(&vc, &mut transcript.clone(), &mut rng, *n) + }) + .collect::>(); let max_n = verifications.iter().map(|v| v.n).max().unwrap_or(0); let max_m = verifications.iter().map(|v| v.m).max().unwrap_or(0); let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); - assert!( - RangeProof::verify_batch( - verifications, - generators.all(), - &mut rng - ).is_ok() - ); + assert!(RangeProof::verify_batch(verifications, generators.all(), &mut rng).is_ok()); } @@ -276,7 +269,9 @@ mod tests { value_commitments = values .iter() .zip(blindings.iter()) - .map(|(&v, &v_blinding)| pg.commit(Scalar::from_u64(v), v_blinding)) + .map(|(&v, &v_blinding)| { + pg.commit(Scalar::from_u64(v), v_blinding) + }) .collect(); (proof_bytes, value_commitments) @@ -337,12 +332,12 @@ mod tests { #[test] fn batch_verify_n_differ_m_differ_total_64() { - batch_verify_helper(&[(64, 1), (32, 2), (16,4)]); + batch_verify_helper(&[(64, 1), (32, 2), (16, 4)]); } #[test] fn batch_verify_n_differ_m_differ_total_256() { - batch_verify_helper(&[(16, 1), (32, 2), (64,4)]); + batch_verify_helper(&[(16, 1), (32, 2), (64, 4)]); } #[test] diff --git a/src/range_proof/verification.rs b/src/range_proof/verification.rs index 3ca2284e..f7132b85 100644 --- a/src/range_proof/verification.rs +++ b/src/range_proof/verification.rs @@ -15,32 +15,32 @@ use proof_transcript::ProofTranscript; use range_proof::RangeProof; use util; -/// Represents a deferred computation to verify a single rangeproof. +/// Represents a deferred computation to verify a single rangeproof. /// Multiple instances can be verified more efficient as a batch using /// `RangeProof::verify_batch` function. pub struct Verification { - /// Number of commitments in the aggregated proof - pub m: usize, + /// Number of commitments in the aggregated proof + pub m: usize, - /// Size of the range in bits - pub n: usize, + /// Size of the range in bits + pub n: usize, - /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. - pedersen_base_scalars: (Scalar, Scalar), + /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. + pedersen_base_scalars: (Scalar, Scalar), - /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` - /// so we can easily pad them when verifying proofs with different `m`s. - g_scalars: Vec, + /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + g_scalars: Vec, - /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` - /// so we can easily pad them when verifying proofs with different `m`s. - h_scalars: Vec, + /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + h_scalars: Vec, - /// List of scalars for any number of dynamic bases. - dynamic_base_scalars: Vec, + /// List of scalars for any number of dynamic bases. + dynamic_base_scalars: Vec, - /// List of dynamic bases for the corresponding scalars. - dynamic_bases: Vec, + /// List of dynamic bases for the corresponding scalars. + dynamic_bases: Vec, /// Internal flag that prevents accidentally dropping /// this struct without properly verifying it first. @@ -50,14 +50,15 @@ pub struct Verification { impl Drop for Verification { fn drop(&mut self) { if !self.verified { - panic!("Deferred range proof Verification was not explicitly \ - verified using `RangeProof::verify_batch`!"); + panic!( + "Deferred range proof Verification was not explicitly \ + verified using `RangeProof::verify_batch`!" + ); } } } impl RangeProof { - /// Prepares a `Verification` struct /// that can be combined with others in a batch. pub fn prepare_verification( @@ -117,7 +118,9 @@ impl RangeProof { let h = s_inv .zip(util::exp_iter(y.invert())) .zip(concat_z_and_2) - .map(|((s_i_inv, exp_y_inv), z_and_2)| z + exp_y_inv * (zz * z_and_2 - b * s_i_inv)); + .map(|((s_i_inv, exp_y_inv), z_and_2)| { + z + exp_y_inv * (zz * z_and_2 - b * s_i_inv) + }); let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); @@ -128,8 +131,7 @@ impl RangeProof { pedersen_base_scalars: (basepoint_scalar, -self.e_blinding - c * self.t_x_blinding), g_scalars: g.collect(), h_scalars: h.collect(), - dynamic_base_scalars: - iter::once(Scalar::one()) + dynamic_base_scalars: iter::once(Scalar::one()) .chain(iter::once(x)) .chain(value_commitment_scalars) .chain(iter::once(c * x)) @@ -137,8 +139,7 @@ impl RangeProof { .chain(x_sq.iter().cloned()) .chain(x_inv_sq.iter().cloned()) .collect(), - dynamic_bases: - iter::once(&self.A) + dynamic_bases: iter::once(&self.A) .chain(iter::once(&self.S)) .chain(value_commitments.iter()) .chain(iter::once(&self.T_1)) @@ -147,7 +148,7 @@ impl RangeProof { .chain(self.ipp_proof.R_vec.iter()) .cloned() .collect(), - verified: false + verified: false, } } @@ -159,21 +160,23 @@ impl RangeProof { pub fn verify_batch>( mut batch: B, gens: GeneratorsView, - rng: &mut R + rng: &mut R, ) -> Result<(), &'static str> { let batch = batch.as_mut(); - // we will special-case the first item to avoid unnecessary multiplication, - // so lets check that we have at least one item. - if batch.len() == 0 { - return Ok(()) - } + // we will special-case the first item to avoid unnecessary multiplication, + // so lets check that we have at least one item. + if batch.len() == 0 { + return Ok(()); + } // Make sure we have enough static generators let n = batch.iter().map(|v| v.n).max().unwrap_or(0); let m = batch.iter().map(|v| v.m).max().unwrap_or(0); if gens.G.len() < (n * m) { - return Err("The generators view does not have enough generators for the largest proof") + return Err( + "The generators view does not have enough generators for the largest proof", + ); } // First statement is used without a random factor @@ -181,59 +184,63 @@ impl RangeProof { let mut pedersen_base_scalars: (Scalar, Scalar) = batch[0].pedersen_base_scalars; let mut g_scalars: Vec = batch[0].g_scalars.clone(); let mut h_scalars: Vec = batch[0].h_scalars.clone(); - + // pad static scalars to the largest proof - g_scalars.resize(n*m, Scalar::zero()); - h_scalars.resize(n*m, Scalar::zero()); + g_scalars.resize(n * m, Scalar::zero()); + h_scalars.resize(n * m, Scalar::zero()); let mut dynamic_base_scalars: Vec = batch[0].dynamic_base_scalars.clone(); let mut dynamic_bases: Vec = batch[0].dynamic_bases.clone(); - + // Other statements are added with a random factor per statement for verification in &mut batch[1..] { verification.verified = true; let batch_challenge = Scalar::random(rng); - pedersen_base_scalars.0 += batch_challenge*verification.pedersen_base_scalars.0; - pedersen_base_scalars.1 += batch_challenge*verification.pedersen_base_scalars.1; + pedersen_base_scalars.0 += batch_challenge * verification.pedersen_base_scalars.0; + pedersen_base_scalars.1 += batch_challenge * verification.pedersen_base_scalars.1; // Note: this loop may be shorter than the total amount of scalars if `m < max({m})` for (i, s) in verification.g_scalars.iter().enumerate() { - g_scalars[i] += batch_challenge*s; + g_scalars[i] += batch_challenge * s; } for (i, s) in verification.h_scalars.iter().enumerate() { - h_scalars[i] += batch_challenge*s; + h_scalars[i] += batch_challenge * s; } - dynamic_base_scalars = dynamic_base_scalars.iter() + dynamic_base_scalars = dynamic_base_scalars + .iter() .cloned() - .chain(verification.dynamic_base_scalars.iter().map(|s| batch_challenge*s )) + .chain(verification.dynamic_base_scalars.iter().map(|s| { + batch_challenge * s + })) .collect(); - dynamic_bases = dynamic_bases.iter() + dynamic_bases = dynamic_bases + .iter() .chain(verification.dynamic_bases.iter()) .cloned() .collect(); } - let mega_check = ristretto::vartime::multiscalar_mul( - iter::once(&pedersen_base_scalars.0) - .chain(iter::once(&pedersen_base_scalars.1)) - .chain(g_scalars.iter()) - .chain(h_scalars.iter()) - .chain(dynamic_base_scalars.iter()), - iter::once(&gens.pedersen_generators.B) - .chain(iter::once(&gens.pedersen_generators.B_blinding)) - .chain(gens.G.iter()) - .chain(gens.H.iter()) - .chain(dynamic_bases.iter()) - ); - - if mega_check.is_identity() { - Ok(()) - } else { - Err("Batch verification failed") - } + let mega_check = ristretto::vartime::multiscalar_mul( + iter::once(&pedersen_base_scalars.0) + .chain(iter::once(&pedersen_base_scalars.1)) + .chain(g_scalars.iter()) + .chain(h_scalars.iter()) + .chain(dynamic_base_scalars.iter()), + iter::once(&gens.pedersen_generators.B) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(gens.G.iter()) + .chain(gens.H.iter()) + .chain(dynamic_bases.iter()), + ); + + if mega_check.is_identity() { + Ok(()) + } else { + Err("Batch verification failed") + } } } @@ -279,4 +286,4 @@ mod tests { assert_eq!(power_g, delta(n, 1, &y, &z),); } -} \ No newline at end of file +} From 692cedcc90b01134903c259ccdd4f4a5809b18a0 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 14:16:30 -0700 Subject: [PATCH 181/186] simplify impl --- src/inner_product_proof.rs | 3 + src/range_proof/mod.rs | 275 ++++++++++++++++++++++++++++-- src/range_proof/verification.rs | 289 -------------------------------- 3 files changed, 264 insertions(+), 303 deletions(-) delete mode 100644 src/range_proof/verification.rs diff --git a/src/inner_product_proof.rs b/src/inner_product_proof.rs index 5fdc7c07..d7f1a5fd 100644 --- a/src/inner_product_proof.rs +++ b/src/inner_product_proof.rs @@ -124,6 +124,9 @@ impl InnerProductProof { transcript: &mut ProofTranscript, ) -> (Vec, Vec, Vec) { let lg_n = self.L_vec.len(); + // XXX FIXME: we need to pass in `n` explicitly and assert that lg_n == lg(n) + // This is necessary for avoiding DoS when a short log(n) vector + // causes the O(n) memory usage in the last loop. let n = 1 << lg_n; // 1. Recompute x_k,...,x_1 based on the proof transcript diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 1fe02bea..0082a697 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -2,20 +2,24 @@ #![doc(include = "../docs/range-proof-protocol.md")] use rand::Rng; +use std::iter; +use std::borrow::Borrow; +use curve25519_dalek::ristretto; use curve25519_dalek::ristretto::RistrettoPoint; use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::traits::IsIdentity; use generators::{Generators, GeneratorsView}; use inner_product_proof::InnerProductProof; use proof_transcript::ProofTranscript; +use util; // Modules for MPC protocol pub mod dealer; pub mod messages; pub mod party; -mod verification; /// The `RangeProof` struct represents a single range proof. #[derive(Serialize, Deserialize, Clone, Debug)] @@ -135,16 +139,235 @@ impl RangeProof { n: usize, ) -> Result<(), &'static str> { RangeProof::verify_batch( - [ - self.prepare_verification(value_commitments, transcript, rng, n), - ], + iter::once((self, value_commitments)), + n, gens, + transcript, rng, ) } + + /// Verifies multiple range proofs at once. + /// If any range proof is invalid, the whole batch is invalid. + /// Proofs may use different ranges (`n`) or different number of aggregated commitments (`m`). + /// You must provide big enough view into generators (`gens`) that covers + /// the biggest proof + pub fn verify_batch<'a,'b,I,R,P,V>( + proofs: I, + n: usize, + gens: GeneratorsView, // must have enough points to cover max(m*n) + transcript: &mut ProofTranscript, + rng: &mut R + ) -> Result<(), &'static str> + where + R: Rng, + I: IntoIterator, + P: Borrow, + V: AsRef<[RistrettoPoint]> + { + let mut m: usize = 0; + let mut dyn_bases_count:usize = 0; + let batch = proofs.into_iter().map(|(p, vcs)| { + let m_curr = vcs.as_ref().len(); + let v = p.borrow().prepare_verification(n, vcs, &mut transcript.clone(), rng); + dyn_bases_count += m_curr /*V*/ + 4 /*A,S,T1,T2*/ + 2*p.borrow().ipp_proof.L_vec.len(); + m = m.max(m_curr); + v + }).collect::>(); + + if gens.G.len() < (n * m) { + return Err( + "The generators view does not have enough generators for the largest proof", + ); + } + + // First statement is used without a random factor + let mut pedersen_base_scalars: (Scalar, Scalar) = (Scalar::zero(), Scalar::zero()); + let mut g_scalars: Vec = iter::repeat(Scalar::zero()).take(n*m).collect(); + let mut h_scalars: Vec = iter::repeat(Scalar::zero()).take(n*m).collect(); + + let mut dynamic_base_scalars: Vec = Vec::with_capacity(dyn_bases_count); + let mut dynamic_bases: Vec = Vec::with_capacity(dyn_bases_count); + + // Other statements are added with a random factor per statement + for verification in batch { + + pedersen_base_scalars.0 += verification.pedersen_base_scalars.0; + pedersen_base_scalars.1 += verification.pedersen_base_scalars.1; + + // Note: this loop may be shorter than the total amount of scalars if `m < max({m})` + for (i, s) in verification.g_scalars.iter().enumerate() { + g_scalars[i] += s; + } + for (i, s) in verification.h_scalars.iter().enumerate() { + h_scalars[i] += s; + } + + dynamic_base_scalars.extend(verification.dynamic_base_scalars); + dynamic_bases.extend(verification.dynamic_bases); + } + + let mega_check = ristretto::vartime::multiscalar_mul( + iter::once(&pedersen_base_scalars.0) + .chain(iter::once(&pedersen_base_scalars.1)) + .chain(g_scalars.iter()) + .chain(h_scalars.iter()) + .chain(dynamic_base_scalars.iter()), + iter::once(&gens.pedersen_generators.B) + .chain(iter::once(&gens.pedersen_generators.B_blinding)) + .chain(gens.G.iter()) + .chain(gens.H.iter()) + .chain(dynamic_bases.iter()), + ); + + if mega_check.is_identity() { + Ok(()) + } else { + Err("Verification failed") + } + } + + /// Prepares a `Verification` struct + /// that can be combined with others in a batch. + fn prepare_verification( + &self, + n: usize, + value_commitments: V, + transcript: &mut ProofTranscript, + rng: &mut R, + ) -> Verification + where + R: Rng, + V: AsRef<[RistrettoPoint]> + { + // First, replay the "interactive" protocol using the proof + // data to recompute all challenges. + + let m = value_commitments.as_ref().len(); + + transcript.commit_u64(n as u64); + transcript.commit_u64(m as u64); + + for V in value_commitments.as_ref().iter() { + transcript.commit(V.borrow().compress().as_bytes()); + } + transcript.commit(self.A.compress().as_bytes()); + transcript.commit(self.S.compress().as_bytes()); + + let y = transcript.challenge_scalar(); + let z = transcript.challenge_scalar(); + let zz = z * z; + let minus_z = -z; + + transcript.commit(self.T_1.compress().as_bytes()); + transcript.commit(self.T_2.compress().as_bytes()); + + let x = transcript.challenge_scalar(); + + transcript.commit(self.t_x.as_bytes()); + transcript.commit(self.t_x_blinding.as_bytes()); + transcript.commit(self.e_blinding.as_bytes()); + + let w = transcript.challenge_scalar(); + + // Challenge value for combining two statements within a rangeproof. + let c = Scalar::random(rng); + + let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); + let s_inv = s.iter().rev(); + + let a = self.ipp_proof.a; + let b = self.ipp_proof.b; + + // Construct concat_z_and_2, an iterator of the values of + // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n + let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); + let powers_of_z = util::exp_iter(z).take(m); + let concat_z_and_2 = + powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); + + let g = s.iter() + .map(|s_i| minus_z - a * s_i); + let h = s_inv + .zip(util::exp_iter(y.invert())) + .zip(concat_z_and_2) + .map(|((s_i_inv, exp_y_inv), z_and_2)| { + z + exp_y_inv * (zz * z_and_2 - b * s_i_inv) + }); + + let value_commitment_scalars = util::exp_iter(z) + .take(m) + .map(|z_exp| c * zz * z_exp); + + let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); + + // Challenge value for combining the complete range proof statement with other range proof statements. + let batch_challenge = Scalar::random(rng); + + Verification { + pedersen_base_scalars: ( + batch_challenge*basepoint_scalar, + batch_challenge*(-self.e_blinding - c * self.t_x_blinding) + ), + g_scalars: g.map(|s| batch_challenge*s ).collect(), + h_scalars: h.map(|s| batch_challenge*s ).collect(), + dynamic_base_scalars: iter::once(Scalar::one()) + .chain(iter::once(x)) + .chain(value_commitment_scalars) + .chain(iter::once(c * x)) + .chain(iter::once(c * x * x)) + .chain(x_sq.iter().cloned()) + .chain(x_inv_sq.iter().cloned()) + .map(|s| batch_challenge*s ) + .collect(), + dynamic_bases: iter::once(&self.A) + .chain(iter::once(&self.S)) + .chain(value_commitments.as_ref().iter()) + .chain(iter::once(&self.T_1)) + .chain(iter::once(&self.T_2)) + .chain(self.ipp_proof.L_vec.iter()) + .chain(self.ipp_proof.R_vec.iter()) + .cloned() + .collect(), + } + } +} + +/// Represents a deferred computation to verify a single rangeproof. +/// Multiple instances can be verified more efficient as a batch using +/// `RangeProof::verify_batch` function. +struct Verification { + /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. + pedersen_base_scalars: (Scalar, Scalar), + + /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + g_scalars: Vec, + + /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` + /// so we can easily pad them when verifying proofs with different `m`s. + h_scalars: Vec, + + /// List of scalars for any number of dynamic bases. + dynamic_base_scalars: Vec, + + /// List of dynamic bases for the corresponding scalars. + dynamic_bases: Vec, } +/// Compute +/// \\[ +/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle +/// \\] +fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { + let sum_y = util::sum_of_powers(y, n * m); + let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); + let sum_z = util::sum_of_powers(z, m); + + (z - z * z) * sum_y - z * z * z * sum_2 * sum_z +} + #[cfg(test)] mod tests { use super::*; @@ -206,23 +429,21 @@ mod tests { use bincode; let mut rng = OsRng::new().unwrap(); - let transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); + let mut transcript = ProofTranscript::new(b"AggregatedRangeProofTest"); - let verifications = nm.iter() + let inputs = nm.iter() .map(|(n, m)| { let (p, vc) = singleparty_create_helper(*n, *m); - bincode::deserialize::(&p) - .unwrap() - .prepare_verification(&vc, &mut transcript.clone(), &mut rng, *n) - }) - .collect::>(); + let proof = bincode::deserialize::(&p).unwrap(); + (proof, vc) + }); - let max_n = verifications.iter().map(|v| v.n).max().unwrap_or(0); - let max_m = verifications.iter().map(|v| v.m).max().unwrap_or(0); + let max_n = nm.iter().map(|(n,_)| *n).max().unwrap_or(0); + let max_m = nm.iter().map(|(_,m)| *m).max().unwrap_or(0); let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); - assert!(RangeProof::verify_batch(verifications, generators.all(), &mut rng).is_ok()); + assert!(RangeProof::verify_batch(inputs, max_n, generators.all(), &mut transcript, &mut rng).is_ok()); } @@ -450,4 +671,30 @@ mod tests { // XXX when we have error types, check finer info than "was error" assert!(maybe_share0.is_err()); } + + #[test] + fn test_delta() { + let mut rng = OsRng::new().unwrap(); + let y = Scalar::random(&mut rng); + let z = Scalar::random(&mut rng); + + // Choose n = 256 to ensure we overflow the group order during + // the computation, to check that that's done correctly + let n = 256; + + // code copied from previous implementation + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + assert_eq!(power_g, delta(n, 1, &y, &z),); + } } diff --git a/src/range_proof/verification.rs b/src/range_proof/verification.rs deleted file mode 100644 index f7132b85..00000000 --- a/src/range_proof/verification.rs +++ /dev/null @@ -1,289 +0,0 @@ -#![allow(non_snake_case)] - -use rand::Rng; - -use std::iter; -use std::borrow::Borrow; - -use curve25519_dalek::ristretto; -use curve25519_dalek::ristretto::RistrettoPoint; -use curve25519_dalek::scalar::Scalar; -use curve25519_dalek::traits::IsIdentity; - -use generators::GeneratorsView; -use proof_transcript::ProofTranscript; -use range_proof::RangeProof; -use util; - -/// Represents a deferred computation to verify a single rangeproof. -/// Multiple instances can be verified more efficient as a batch using -/// `RangeProof::verify_batch` function. -pub struct Verification { - /// Number of commitments in the aggregated proof - pub m: usize, - - /// Size of the range in bits - pub n: usize, - - /// Pair of scalars multiplying pedersen bases `B`, `B_blinding`. - pedersen_base_scalars: (Scalar, Scalar), - - /// List of scalars for `n*m` `G` bases. These are separated from `h_scalars` - /// so we can easily pad them when verifying proofs with different `m`s. - g_scalars: Vec, - - /// List of scalars for `n*m` `H` bases. These are separated from `g_scalars` - /// so we can easily pad them when verifying proofs with different `m`s. - h_scalars: Vec, - - /// List of scalars for any number of dynamic bases. - dynamic_base_scalars: Vec, - - /// List of dynamic bases for the corresponding scalars. - dynamic_bases: Vec, - - /// Internal flag that prevents accidentally dropping - /// this struct without properly verifying it first. - verified: bool, -} - -impl Drop for Verification { - fn drop(&mut self) { - if !self.verified { - panic!( - "Deferred range proof Verification was not explicitly \ - verified using `RangeProof::verify_batch`!" - ); - } - } -} - -impl RangeProof { - /// Prepares a `Verification` struct - /// that can be combined with others in a batch. - pub fn prepare_verification( - &self, - value_commitments: &[RistrettoPoint], - transcript: &mut ProofTranscript, - rng: &mut R, - n: usize, - ) -> Verification { - // First, replay the "interactive" protocol using the proof - // data to recompute all challenges. - - let m = value_commitments.as_ref().len(); - - transcript.commit_u64(n as u64); - transcript.commit_u64(m as u64); - - for V in value_commitments.as_ref().iter() { - transcript.commit(V.borrow().compress().as_bytes()); - } - transcript.commit(self.A.compress().as_bytes()); - transcript.commit(self.S.compress().as_bytes()); - - let y = transcript.challenge_scalar(); - let z = transcript.challenge_scalar(); - let zz = z * z; - let minus_z = -z; - - transcript.commit(self.T_1.compress().as_bytes()); - transcript.commit(self.T_2.compress().as_bytes()); - - let x = transcript.challenge_scalar(); - - transcript.commit(self.t_x.as_bytes()); - transcript.commit(self.t_x_blinding.as_bytes()); - transcript.commit(self.e_blinding.as_bytes()); - - let w = transcript.challenge_scalar(); - - // Challenge value for batching statements to be verified - let c = Scalar::random(rng); - - let (x_sq, x_inv_sq, s) = self.ipp_proof.verification_scalars(transcript); - let s_inv = s.iter().rev(); - - let a = self.ipp_proof.a; - let b = self.ipp_proof.b; - - // Construct concat_z_and_2, an iterator of the values of - // z^0 * \vec(2)^n || z^1 * \vec(2)^n || ... || z^(m-1) * \vec(2)^n - let powers_of_2: Vec = util::exp_iter(Scalar::from_u64(2)).take(n).collect(); - let powers_of_z = util::exp_iter(z).take(m); - let concat_z_and_2 = - powers_of_z.flat_map(|exp_z| powers_of_2.iter().map(move |exp_2| exp_2 * exp_z)); - - let g = s.iter().map(|s_i| minus_z - a * s_i); - let h = s_inv - .zip(util::exp_iter(y.invert())) - .zip(concat_z_and_2) - .map(|((s_i_inv, exp_y_inv), z_and_2)| { - z + exp_y_inv * (zz * z_and_2 - b * s_i_inv) - }); - - let value_commitment_scalars = util::exp_iter(z).take(m).map(|z_exp| c * zz * z_exp); - let basepoint_scalar = w * (self.t_x - a * b) + c * (delta(n, m, &y, &z) - self.t_x); - - Verification { - m, - n, - pedersen_base_scalars: (basepoint_scalar, -self.e_blinding - c * self.t_x_blinding), - g_scalars: g.collect(), - h_scalars: h.collect(), - dynamic_base_scalars: iter::once(Scalar::one()) - .chain(iter::once(x)) - .chain(value_commitment_scalars) - .chain(iter::once(c * x)) - .chain(iter::once(c * x * x)) - .chain(x_sq.iter().cloned()) - .chain(x_inv_sq.iter().cloned()) - .collect(), - dynamic_bases: iter::once(&self.A) - .chain(iter::once(&self.S)) - .chain(value_commitments.iter()) - .chain(iter::once(&self.T_1)) - .chain(iter::once(&self.T_2)) - .chain(self.ipp_proof.L_vec.iter()) - .chain(self.ipp_proof.R_vec.iter()) - .cloned() - .collect(), - verified: false, - } - } - - /// Verifies multiple range proofs at once. - /// If any range proof is invalid, the whole batch is invalid. - /// Proofs may use different ranges (`n`) or different number of aggregated commitments (`m`). - /// You must provide big enough view into generators (`gens`) that covers - /// the biggest proof - pub fn verify_batch>( - mut batch: B, - gens: GeneratorsView, - rng: &mut R, - ) -> Result<(), &'static str> { - let batch = batch.as_mut(); - - // we will special-case the first item to avoid unnecessary multiplication, - // so lets check that we have at least one item. - if batch.len() == 0 { - return Ok(()); - } - - // Make sure we have enough static generators - let n = batch.iter().map(|v| v.n).max().unwrap_or(0); - let m = batch.iter().map(|v| v.m).max().unwrap_or(0); - if gens.G.len() < (n * m) { - return Err( - "The generators view does not have enough generators for the largest proof", - ); - } - - // First statement is used without a random factor - batch[0].verified = true; - let mut pedersen_base_scalars: (Scalar, Scalar) = batch[0].pedersen_base_scalars; - let mut g_scalars: Vec = batch[0].g_scalars.clone(); - let mut h_scalars: Vec = batch[0].h_scalars.clone(); - - // pad static scalars to the largest proof - g_scalars.resize(n * m, Scalar::zero()); - h_scalars.resize(n * m, Scalar::zero()); - - let mut dynamic_base_scalars: Vec = batch[0].dynamic_base_scalars.clone(); - let mut dynamic_bases: Vec = batch[0].dynamic_bases.clone(); - - // Other statements are added with a random factor per statement - for verification in &mut batch[1..] { - verification.verified = true; - let batch_challenge = Scalar::random(rng); - - pedersen_base_scalars.0 += batch_challenge * verification.pedersen_base_scalars.0; - pedersen_base_scalars.1 += batch_challenge * verification.pedersen_base_scalars.1; - - // Note: this loop may be shorter than the total amount of scalars if `m < max({m})` - for (i, s) in verification.g_scalars.iter().enumerate() { - g_scalars[i] += batch_challenge * s; - } - for (i, s) in verification.h_scalars.iter().enumerate() { - h_scalars[i] += batch_challenge * s; - } - - dynamic_base_scalars = dynamic_base_scalars - .iter() - .cloned() - .chain(verification.dynamic_base_scalars.iter().map(|s| { - batch_challenge * s - })) - .collect(); - - dynamic_bases = dynamic_bases - .iter() - .chain(verification.dynamic_bases.iter()) - .cloned() - .collect(); - } - - let mega_check = ristretto::vartime::multiscalar_mul( - iter::once(&pedersen_base_scalars.0) - .chain(iter::once(&pedersen_base_scalars.1)) - .chain(g_scalars.iter()) - .chain(h_scalars.iter()) - .chain(dynamic_base_scalars.iter()), - iter::once(&gens.pedersen_generators.B) - .chain(iter::once(&gens.pedersen_generators.B_blinding)) - .chain(gens.G.iter()) - .chain(gens.H.iter()) - .chain(dynamic_bases.iter()), - ); - - if mega_check.is_identity() { - Ok(()) - } else { - Err("Batch verification failed") - } - } -} - -/// Compute -/// \\[ -/// \delta(y,z) = (z - z^{2}) \langle 1, {\mathbf{y}}^{nm} \rangle + z^{3} \langle \mathbf{1}, {\mathbf{2}}^{nm} \rangle -/// \\] -fn delta(n: usize, m: usize, y: &Scalar, z: &Scalar) -> Scalar { - let sum_y = util::sum_of_powers(y, n * m); - let sum_2 = util::sum_of_powers(&Scalar::from_u64(2), n); - let sum_z = util::sum_of_powers(z, m); - - (z - z * z) * sum_y - z * z * z * sum_2 * sum_z -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::OsRng; - - #[test] - fn test_delta() { - let mut rng = OsRng::new().unwrap(); - let y = Scalar::random(&mut rng); - let z = Scalar::random(&mut rng); - - // Choose n = 256 to ensure we overflow the group order during - // the computation, to check that that's done correctly - let n = 256; - - // code copied from previous implementation - let z2 = z * z; - let z3 = z2 * z; - let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..n { - power_g += (z - z2) * exp_y - z3 * exp_2; - - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - assert_eq!(power_g, delta(n, 1, &y, &z),); - } -} From 647a4216948da396d477cffb35bd4785bd3ed1c9 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 14:17:23 -0700 Subject: [PATCH 182/186] move test_delta back --- src/range_proof/mod.rs | 52 +++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 0082a697..a2680372 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -375,6 +375,32 @@ mod tests { use generators::PedersenGenerators; + #[test] + fn test_delta() { + let mut rng = OsRng::new().unwrap(); + let y = Scalar::random(&mut rng); + let z = Scalar::random(&mut rng); + + // Choose n = 256 to ensure we overflow the group order during + // the computation, to check that that's done correctly + let n = 256; + + // code copied from previous implementation + let z2 = z * z; + let z3 = z2 * z; + let mut power_g = Scalar::zero(); + let mut exp_y = Scalar::one(); // start at y^0 = 1 + let mut exp_2 = Scalar::one(); // start at 2^0 = 1 + for _ in 0..n { + power_g += (z - z2) * exp_y - z3 * exp_2; + + exp_y = exp_y * y; // y^i -> y^(i+1) + exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) + } + + assert_eq!(power_g, delta(n, 1, &y, &z),); + } + /// Given a bitsize `n`, test the following: /// /// 1. Generate `m` random values and create a proof they are all in range; @@ -671,30 +697,4 @@ mod tests { // XXX when we have error types, check finer info than "was error" assert!(maybe_share0.is_err()); } - - #[test] - fn test_delta() { - let mut rng = OsRng::new().unwrap(); - let y = Scalar::random(&mut rng); - let z = Scalar::random(&mut rng); - - // Choose n = 256 to ensure we overflow the group order during - // the computation, to check that that's done correctly - let n = 256; - - // code copied from previous implementation - let z2 = z * z; - let z3 = z2 * z; - let mut power_g = Scalar::zero(); - let mut exp_y = Scalar::one(); // start at y^0 = 1 - let mut exp_2 = Scalar::one(); // start at 2^0 = 1 - for _ in 0..n { - power_g += (z - z2) * exp_y - z3 * exp_2; - - exp_y = exp_y * y; // y^i -> y^(i+1) - exp_2 = exp_2 + exp_2; // 2^i -> 2^(i+1) - } - - assert_eq!(power_g, delta(n, 1, &y, &z),); - } } From b7ceab5e813f9ff81b8e6ca7da0469acf9721e75 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 14:42:36 -0700 Subject: [PATCH 183/186] debugging batch logic --- src/range_proof/mod.rs | 43 ++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index a2680372..94b37129 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -139,8 +139,7 @@ impl RangeProof { n: usize, ) -> Result<(), &'static str> { RangeProof::verify_batch( - iter::once((self, value_commitments)), - n, + iter::once((self, value_commitments, n)), gens, transcript, rng, @@ -153,29 +152,32 @@ impl RangeProof { /// You must provide big enough view into generators (`gens`) that covers /// the biggest proof pub fn verify_batch<'a,'b,I,R,P,V>( - proofs: I, - n: usize, + proofs: I, gens: GeneratorsView, // must have enough points to cover max(m*n) transcript: &mut ProofTranscript, rng: &mut R ) -> Result<(), &'static str> where R: Rng, - I: IntoIterator, + I: IntoIterator, P: Borrow, V: AsRef<[RistrettoPoint]> { - let mut m: usize = 0; + println!("Verifying batch!"); + let mut nm: usize = 0; let mut dyn_bases_count:usize = 0; - let batch = proofs.into_iter().map(|(p, vcs)| { - let m_curr = vcs.as_ref().len(); + let batch = proofs.into_iter().map(|(p, vcs, n)| { + let m = vcs.as_ref().len(); let v = p.borrow().prepare_verification(n, vcs, &mut transcript.clone(), rng); - dyn_bases_count += m_curr /*V*/ + 4 /*A,S,T1,T2*/ + 2*p.borrow().ipp_proof.L_vec.len(); - m = m.max(m_curr); + dyn_bases_count += m /*V*/ + 4 /*A,S,T1,T2*/ + 2*p.borrow().ipp_proof.L_vec.len() /*{L,R}*/; + println!("Current nm = {:?}, n,m = {:?},{:?}", nm, n, m); + nm = nm.max(n*m); v - }).collect::>(); + }).collect::>(); // we need to collect here so that nm and dyn_bases_count are computed. - if gens.G.len() < (n * m) { + println!("Batch size = {:?}", batch.len()); + + if gens.G.len() < nm { return Err( "The generators view does not have enough generators for the largest proof", ); @@ -183,19 +185,23 @@ impl RangeProof { // First statement is used without a random factor let mut pedersen_base_scalars: (Scalar, Scalar) = (Scalar::zero(), Scalar::zero()); - let mut g_scalars: Vec = iter::repeat(Scalar::zero()).take(n*m).collect(); - let mut h_scalars: Vec = iter::repeat(Scalar::zero()).take(n*m).collect(); + let mut g_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); + let mut h_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); let mut dynamic_base_scalars: Vec = Vec::with_capacity(dyn_bases_count); let mut dynamic_bases: Vec = Vec::with_capacity(dyn_bases_count); - // Other statements are added with a random factor per statement + println!("Static scalars = {:?}", nm); + println!("Dynamic scalars = {:?}", dyn_bases_count); + + // All statements are added up. Each scalar in each statement + // already has a challenge pre-multiplied in `prepare_verification`. for verification in batch { pedersen_base_scalars.0 += verification.pedersen_base_scalars.0; pedersen_base_scalars.1 += verification.pedersen_base_scalars.1; - // Note: this loop may be shorter than the total amount of scalars if `m < max({m})` + // Note: these loops may be shorter than the total amount of scalars if `n*m < max({n*m})` for (i, s) in verification.g_scalars.iter().enumerate() { g_scalars[i] += s; } @@ -229,6 +235,7 @@ impl RangeProof { /// Prepares a `Verification` struct /// that can be combined with others in a batch. + /// Note: all scalars are pre-multiplied by a random challenge. fn prepare_verification( &self, n: usize, @@ -461,7 +468,7 @@ mod tests { .map(|(n, m)| { let (p, vc) = singleparty_create_helper(*n, *m); let proof = bincode::deserialize::(&p).unwrap(); - (proof, vc) + (proof, vc, *n) }); let max_n = nm.iter().map(|(n,_)| *n).max().unwrap_or(0); @@ -469,7 +476,7 @@ mod tests { let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); - assert!(RangeProof::verify_batch(inputs, max_n, generators.all(), &mut transcript, &mut rng).is_ok()); + assert!(RangeProof::verify_batch(inputs, generators.all(), &mut transcript, &mut rng).is_ok()); } From d33dc231f24f98de7463f0472a56535d94609ab3 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 15:04:26 -0700 Subject: [PATCH 184/186] bug fix: take only necessary number of generator points --- src/range_proof/mod.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 94b37129..4c34015a 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -163,20 +163,16 @@ impl RangeProof { P: Borrow, V: AsRef<[RistrettoPoint]> { - println!("Verifying batch!"); let mut nm: usize = 0; let mut dyn_bases_count:usize = 0; let batch = proofs.into_iter().map(|(p, vcs, n)| { let m = vcs.as_ref().len(); let v = p.borrow().prepare_verification(n, vcs, &mut transcript.clone(), rng); dyn_bases_count += m /*V*/ + 4 /*A,S,T1,T2*/ + 2*p.borrow().ipp_proof.L_vec.len() /*{L,R}*/; - println!("Current nm = {:?}, n,m = {:?},{:?}", nm, n, m); nm = nm.max(n*m); v }).collect::>(); // we need to collect here so that nm and dyn_bases_count are computed. - println!("Batch size = {:?}", batch.len()); - if gens.G.len() < nm { return Err( "The generators view does not have enough generators for the largest proof", @@ -191,9 +187,6 @@ impl RangeProof { let mut dynamic_base_scalars: Vec = Vec::with_capacity(dyn_bases_count); let mut dynamic_bases: Vec = Vec::with_capacity(dyn_bases_count); - println!("Static scalars = {:?}", nm); - println!("Dynamic scalars = {:?}", dyn_bases_count); - // All statements are added up. Each scalar in each statement // already has a challenge pre-multiplied in `prepare_verification`. for verification in batch { @@ -221,8 +214,8 @@ impl RangeProof { .chain(dynamic_base_scalars.iter()), iter::once(&gens.pedersen_generators.B) .chain(iter::once(&gens.pedersen_generators.B_blinding)) - .chain(gens.G.iter()) - .chain(gens.H.iter()) + .chain(gens.G.iter().take(nm)) + .chain(gens.H.iter().take(nm)) .chain(dynamic_bases.iter()), ); @@ -589,6 +582,11 @@ mod tests { batch_verify_helper(&[(64, 1), (32, 2), (16, 4)]); } + #[test] + fn batch_verify_mvp_failure() { + batch_verify_helper(&[(4,1),(2,2)]); + } + #[test] fn batch_verify_n_differ_m_differ_total_256() { batch_verify_helper(&[(16, 1), (32, 2), (64, 4)]); From 7e610022f42ee5a8120e073dddf3a049e2bf3881 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 15:07:28 -0700 Subject: [PATCH 185/186] remove obsolete test --- src/range_proof/mod.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 4c34015a..5dcf612a 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -464,10 +464,11 @@ mod tests { (proof, vc, *n) }); - let max_n = nm.iter().map(|(n,_)| *n).max().unwrap_or(0); - let max_m = nm.iter().map(|(_,m)| *m).max().unwrap_or(0); + let max_nm = nm.iter().map(|(n,m)| n * m).max().unwrap_or(0); - let generators = Generators::new(PedersenGenerators::default(), max_n, max_m); + // hackish split of `n*m` into `n*m, 1` because we do not want + // to compute more 8 generators for the case such as ((4,1),(2,2)). + let generators = Generators::new(PedersenGenerators::default(), max_nm, 1); assert!(RangeProof::verify_batch(inputs, generators.all(), &mut transcript, &mut rng).is_ok()); } @@ -582,11 +583,6 @@ mod tests { batch_verify_helper(&[(64, 1), (32, 2), (16, 4)]); } - #[test] - fn batch_verify_mvp_failure() { - batch_verify_helper(&[(4,1),(2,2)]); - } - #[test] fn batch_verify_n_differ_m_differ_total_256() { batch_verify_helper(&[(16, 1), (32, 2), (64, 4)]); From 7295a16750ed03709b9025edeeae86167e977857 Mon Sep 17 00:00:00 2001 From: Oleg Andreev Date: Tue, 8 May 2018 15:17:31 -0700 Subject: [PATCH 186/186] remove unnecessary vec allocations --- src/range_proof/mod.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/range_proof/mod.rs b/src/range_proof/mod.rs index 5dcf612a..0ec4dc1a 100644 --- a/src/range_proof/mod.rs +++ b/src/range_proof/mod.rs @@ -164,14 +164,12 @@ impl RangeProof { V: AsRef<[RistrettoPoint]> { let mut nm: usize = 0; - let mut dyn_bases_count:usize = 0; let batch = proofs.into_iter().map(|(p, vcs, n)| { let m = vcs.as_ref().len(); let v = p.borrow().prepare_verification(n, vcs, &mut transcript.clone(), rng); - dyn_bases_count += m /*V*/ + 4 /*A,S,T1,T2*/ + 2*p.borrow().ipp_proof.L_vec.len() /*{L,R}*/; nm = nm.max(n*m); v - }).collect::>(); // we need to collect here so that nm and dyn_bases_count are computed. + }).collect::>(); if gens.G.len() < nm { return Err( @@ -184,12 +182,16 @@ impl RangeProof { let mut g_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); let mut h_scalars: Vec = iter::repeat(Scalar::zero()).take(nm).collect(); - let mut dynamic_base_scalars: Vec = Vec::with_capacity(dyn_bases_count); - let mut dynamic_bases: Vec = Vec::with_capacity(dyn_bases_count); + let dynamic_base_scalars = batch.iter().flat_map(|v| { + v.dynamic_base_scalars.iter() + }); + let dynamic_bases = batch.iter().flat_map(|v| { + v.dynamic_bases.iter() + }); // All statements are added up. Each scalar in each statement // already has a challenge pre-multiplied in `prepare_verification`. - for verification in batch { + for verification in &batch { pedersen_base_scalars.0 += verification.pedersen_base_scalars.0; pedersen_base_scalars.1 += verification.pedersen_base_scalars.1; @@ -201,9 +203,6 @@ impl RangeProof { for (i, s) in verification.h_scalars.iter().enumerate() { h_scalars[i] += s; } - - dynamic_base_scalars.extend(verification.dynamic_base_scalars); - dynamic_bases.extend(verification.dynamic_bases); } let mega_check = ristretto::vartime::multiscalar_mul( @@ -211,12 +210,12 @@ impl RangeProof { .chain(iter::once(&pedersen_base_scalars.1)) .chain(g_scalars.iter()) .chain(h_scalars.iter()) - .chain(dynamic_base_scalars.iter()), + .chain(dynamic_base_scalars), iter::once(&gens.pedersen_generators.B) .chain(iter::once(&gens.pedersen_generators.B_blinding)) .chain(gens.G.iter().take(nm)) .chain(gens.H.iter().take(nm)) - .chain(dynamic_bases.iter()), + .chain(dynamic_bases), ); if mega_check.is_identity() {