diff --git a/curve25519-dalek/benches/dalek_benchmarks.rs b/curve25519-dalek/benches/dalek_benchmarks.rs index b8bdd6772..ea765c645 100644 --- a/curve25519-dalek/benches/dalek_benchmarks.rs +++ b/curve25519-dalek/benches/dalek_benchmarks.rs @@ -34,7 +34,7 @@ mod edwards_benches { let mut rng = OsRng.unwrap_err(); let points: Vec = (0..size).map(|_| EdwardsPoint::random(&mut rng)).collect(); - b.iter(|| EdwardsPoint::compress_batch(&points)); + b.iter(|| EdwardsPoint::compress_batch_alloc(&points)); }, ); } diff --git a/curve25519-dalek/src/edwards.rs b/curve25519-dalek/src/edwards.rs index f7d2e6906..8b823ad61 100644 --- a/curve25519-dalek/src/edwards.rs +++ b/curve25519-dalek/src/edwards.rs @@ -599,7 +599,7 @@ impl EdwardsPoint { // Compute the denominators in a batch let mut denominators = eds.iter().map(|p| &p.Z - &p.Y).collect::>(); - FieldElement::batch_invert(&mut denominators); + FieldElement::invert_batch_alloc(&mut denominators); // Now compute the Montgomery u coordinate for every point let mut ret = Vec::with_capacity(eds.len()); @@ -616,12 +616,24 @@ impl EdwardsPoint { self.to_affine().compress() } + /// Compress several `EdwardsPoint`s into `CompressedEdwardsY` format, using a batch inversion + /// for a significant speedup. + pub fn compress_batch(inputs: &[EdwardsPoint; N]) -> [CompressedEdwardsY; N] { + let mut zs: [_; N] = core::array::from_fn(|i| inputs[i].Z); + FieldElement::invert_batch(&mut zs); + + core::array::from_fn(|i| { + let x = &inputs[i].X * &zs[i]; + let y = &inputs[i].Y * &zs[i]; + AffinePoint { x, y }.compress() + }) + } /// Compress several `EdwardsPoint`s into `CompressedEdwardsY` format, using a batch inversion /// for a significant speedup. #[cfg(feature = "alloc")] - pub fn compress_batch(inputs: &[EdwardsPoint]) -> Vec { + pub fn compress_batch_alloc(inputs: &[EdwardsPoint]) -> Vec { let mut zs = inputs.iter().map(|input| input.Z).collect::>(); - FieldElement::batch_invert(&mut zs); + FieldElement::invert_batch_alloc(&mut zs); inputs .iter() @@ -2177,7 +2189,7 @@ mod test { #[cfg(feature = "alloc")] { - let compressed = EdwardsPoint::compress_batch(&[EdwardsPoint::identity()]); + let compressed = EdwardsPoint::compress_batch_alloc(&[EdwardsPoint::identity()]); assert_eq!(&compressed, &[CompressedEdwardsY::identity()]); } } @@ -2193,7 +2205,7 @@ mod test { .map(|n| constants::ED25519_BASEPOINT_POINT * Scalar::from(n)) .collect::>(); points.extend(core::iter::repeat_with(|| EdwardsPoint::random(&mut rng)).take(100)); - let compressed = EdwardsPoint::compress_batch(&points); + let compressed = EdwardsPoint::compress_batch_alloc(&points); // Check that the batch-compressed points match the individually compressed ones for (point, compressed) in points.iter().zip(&compressed) { diff --git a/curve25519-dalek/src/field.rs b/curve25519-dalek/src/field.rs index a25a73780..714a599c0 100644 --- a/curve25519-dalek/src/field.rs +++ b/curve25519-dalek/src/field.rs @@ -203,17 +203,32 @@ impl FieldElement { (t19, t3) } + /// Given a slice of pub(crate)lic `FieldElements`, replace each with its inverse. + /// + /// When an input `FieldElement` is zero, its value is unchanged. + pub(crate) fn invert_batch(inputs: &mut [FieldElement; N]) { + let mut scratch = [FieldElement::ONE; N]; + + Self::internal_invert_batch(inputs, &mut scratch); + } + /// Given a slice of pub(crate)lic `FieldElements`, replace each with its inverse. /// /// When an input `FieldElement` is zero, its value is unchanged. #[cfg(feature = "alloc")] - pub(crate) fn batch_invert(inputs: &mut [FieldElement]) { + pub(crate) fn invert_batch_alloc(inputs: &mut [FieldElement]) { + let n = inputs.len(); + let mut scratch = vec![FieldElement::ONE; n]; + + Self::internal_invert_batch(inputs, &mut scratch); + } + + fn internal_invert_batch(inputs: &mut [FieldElement], scratch: &mut [FieldElement]) { // Montgomery’s Trick and Fast Implementation of Masked AES // Genelle, Prouff and Quisquater // Section 3.2 - let n = inputs.len(); - let mut scratch = vec![FieldElement::ONE; n]; + debug_assert_eq!(inputs.len(), scratch.len()); // Keep an accumulator of all of the previous products let mut acc = FieldElement::ONE; @@ -234,12 +249,12 @@ impl FieldElement { // Pass through the vector backwards to compute the inverses // in place - for (input, scratch) in inputs.iter_mut().rev().zip(scratch.into_iter().rev()) { + for (input, scratch) in inputs.iter_mut().rev().zip(scratch.iter_mut().rev()) { let tmp = &acc * input; // input <- acc * scratch, then acc <- tmp // Again, we skip zeros in a constant-time way let nz = !input.is_zero(); - input.conditional_assign(&(&acc * &scratch), nz); + input.conditional_assign(&(&acc * scratch), nz); acc.conditional_assign(&tmp, nz); } } @@ -553,7 +568,7 @@ mod test { #[test] #[cfg(feature = "alloc")] - fn batch_invert_a_matches_nonbatched() { + fn invert_batch_a_matches_nonbatched() { let a = FieldElement::from_bytes(&A_BYTES); let ap58 = FieldElement::from_bytes(&AP58_BYTES); let asq = FieldElement::from_bytes(&ASQ_BYTES); @@ -562,7 +577,7 @@ mod test { let a2 = &a + &a; let a_list = vec![a, ap58, asq, ainv, a0, a2]; let mut ainv_list = a_list.clone(); - FieldElement::batch_invert(&mut ainv_list[..]); + FieldElement::invert_batch_alloc(&mut ainv_list[..]); for i in 0..6 { assert_eq!(a_list[i].invert(), ainv_list[i]); } @@ -671,8 +686,8 @@ mod test { #[test] #[cfg(feature = "alloc")] - fn batch_invert_empty() { - FieldElement::batch_invert(&mut []); + fn invert_batch_empty() { + FieldElement::invert_batch_alloc(&mut []); } // The following two consts were generated with the following sage script: diff --git a/curve25519-dalek/src/ristretto.rs b/curve25519-dalek/src/ristretto.rs index 8b867930d..9be7a8a88 100644 --- a/curve25519-dalek/src/ristretto.rs +++ b/curve25519-dalek/src/ristretto.rs @@ -606,7 +606,7 @@ impl RistrettoPoint { let mut invs: Vec = states.iter().map(|state| state.efgh()).collect(); - FieldElement::batch_invert(&mut invs[..]); + FieldElement::invert_batch_alloc(&mut invs[..]); states .iter()