Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .config/rust-f3.dic
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,8 @@ G1
G2
JSON
CBOR
TODO
TODO
coef_i
sig_i
pub_key_i
+
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Remove the stray + character.

The + character does not belong in a spell-check dictionary file. Dictionary files typically contain one term per line without special characters.

Apply this diff to remove it:

-+
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
+
# (line 48 removed; no content remains here)
🤖 Prompt for AI Agents
In .config/rust-f3.dic around line 48, remove the stray "+" character that was
accidentally added; open the file, delete the lone "+" on line 48 so the
dictionary contains only valid entries (one term per line), save the file, and
ensure no other stray punctuation remains.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ ahash = "0.8"
anyhow = "1"
base32 = "0.5.1"
base64 = "0.22"
blake2 = { git = "https://github.com/huitseeker/hashes.git", branch = "blake2x-pr", features = ["blake2x"] }
bls-signatures = { version = "0.15" }
bls12_381 = "0.8"
cid = { version = "0.10.1", features = ["std"] }
Expand All @@ -27,6 +28,7 @@ num-bigint = { version = "0.4.6", features = ["serde"] }
num-traits = "0.2.19"
parking_lot = "0.12"
rand = "0.8"
rayon = "1.10"
serde = { version = "1", features = ["derive"] }
serde_cbor = "0.11.2"
serde_json = { version = "1", features = ["raw_value"] }
Expand Down
2 changes: 2 additions & 0 deletions blssig/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@ edition.workspace = true
rust-version.workspace = true

[dependencies]
blake2.workspace = true
bls-signatures.workspace = true
bls12_381.workspace = true
filecoin-f3-gpbft = { path = "../gpbft", version = "0.1.0" }
hashlink.workspace = true
parking_lot.workspace = true
rayon.workspace = true
thiserror.workspace = true

[dev-dependencies]
Expand Down
120 changes: 99 additions & 21 deletions blssig/src/bdn/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,24 @@
// SPDX-License-Identifier: Apache-2.0, MIT

//! BDN (Boneh-Drijvers-Neven) signature aggregation scheme, for preventing rogue public-key attacks.
//! Those attacks could allow an attacker to forge a public-key and then make a verifiable
//! signature for an aggregation of signatures. It fixes the situation by adding coefficients to the aggregate.
//!
//! NOTE: currently uses standard BLS aggregation without coefficient weighting, hence returns incorrect values compared to go-f3.
//! See the papers:
//! `https://eprint.iacr.org/2018/483.pdf`
//! `https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html`
//!
use crate::verifier::BLSError;
use bls_signatures::{PublicKey, Signature};
use bls12_381::{G1Projective, G2Affine, G2Projective};
use blake2::Blake2xs;
use blake2::digest::{ExtendableOutput, Update, XofReader};
use bls_signatures::{PublicKey, Serialize, Signature};
use bls12_381::{G1Projective, G2Projective, Scalar};
use rayon::prelude::*;

/// BDN aggregation context for managing signature and public key aggregation
pub struct BDNAggregation {
pub_keys: Vec<PublicKey>,
pub(crate) coefficients: Vec<Scalar>,
pub(crate) terms: Vec<PublicKey>,
}

impl BDNAggregation {
Expand All @@ -20,43 +28,113 @@ impl BDNAggregation {
return Err(BLSError::EmptyPublicKeys);
}

Ok(Self { pub_keys })
let coefficients = Self::calc_coefficients(&pub_keys)?;
let terms = Self::calc_terms(&pub_keys, &coefficients);

Ok(Self {
coefficients,
terms,
})
}

/// Aggregates signatures using standard BLS aggregation
/// TODO: Implement BDN aggregation scheme: https://github.com/ChainSafe/rust-f3/issues/29
pub fn aggregate_sigs(&self, sigs: Vec<Signature>) -> Result<Signature, BLSError> {
if sigs.len() != self.pub_keys.len() {
/// Aggregates signatures using BDN aggregation with coefficients.
/// Computes: sum((coef_i + 1) * sig_i) for signatures at the given indices
pub fn aggregate_sigs(
&self,
indices: &[u64],
sigs: &[Signature],
) -> Result<Signature, BLSError> {
if sigs.len() != indices.len() {
return Err(BLSError::LengthMismatch {
pub_keys: self.pub_keys.len(),
pub_keys: indices.len(),
sigs: sigs.len(),
});
}

// Standard BLS aggregation
let mut agg_point = G2Projective::identity();
for sig in sigs {
let sig: G2Affine = sig.into();
agg_point += sig;
for (sig, &idx) in sigs.iter().zip(indices.iter()) {
let idx = idx as usize;
if idx >= self.coefficients.len() {
return Err(BLSError::SignerIndexOutOfRange(idx));
}

let coef = self.coefficients[idx];
let sig_point: G2Projective = (*sig).into();
let sig_c = sig_point * coef;
let sig_c = sig_c + sig_point;

agg_point += sig_c;
}

// Convert back to Signature
let agg_sig: Signature = agg_point.into();
Ok(agg_sig)
}

/// Aggregates public keys using standard BLS aggregation
/// TODO: Implement BDN aggregation scheme: https://github.com/ChainSafe/rust-f3/issues/29
pub fn aggregate_pub_keys(&self) -> Result<PublicKey, BLSError> {
// Standard BLS aggregation
/// Aggregates public keys indices using BDN aggregation with coefficients.
/// Computes: sum((coef_i + 1) * pub_key_i)
pub fn aggregate_pub_keys(&self, indices: &[u64]) -> Result<PublicKey, BLSError> {
// Sum of pre-computed terms (which are already (coef_i + 1) * pub_key_i)
let mut agg_point = G1Projective::identity();
for pub_key in &self.pub_keys {
let pub_key_point: G1Projective = (*pub_key).into();
agg_point += pub_key_point;
for &idx in indices {
let idx = idx as usize;
if idx >= self.terms.len() {
return Err(BLSError::SignerIndexOutOfRange(idx));
}
let term_point: G1Projective = self.terms[idx].into();
agg_point += term_point;
}

// Convert back to PublicKey
let agg_pub_key: PublicKey = agg_point.into();
Ok(agg_pub_key)
}

pub fn calc_coefficients(pub_keys: &[PublicKey]) -> Result<Vec<Scalar>, BLSError> {
let mut hasher = Blake2xs::new(0xFFFF);

// Hash all public keys
for pub_key in pub_keys {
let bytes = pub_key.as_bytes();
hasher.update(&bytes);
}

// Read 16 bytes per public key
let mut reader = hasher.finalize_xof();
let mut output = vec![0u8; pub_keys.len() * 16];
reader.read(&mut output);

// Convert every consecutive 16 bytes chunk to a scalar
let mut coefficients = Vec::with_capacity(pub_keys.len());
for i in 0..pub_keys.len() {
let chunk = &output[i * 16..(i + 1) * 16];

// Convert 16 bytes to 32 bytes, for scalar (pad with zeros)
let mut bytes_32 = [0u8; 32];
bytes_32[..16].copy_from_slice(chunk);

// BLS12-381 scalars expects little-endian byte representation
let scalar = Scalar::from_bytes(&bytes_32);
if scalar.is_some().into() {
coefficients.push(scalar.unwrap());
} else {
return Err(BLSError::InvalidScalar);
}
}

Ok(coefficients)
}
Comment on lines +93 to +126
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

🧩 Analysis chain

Coefficient derivation: tighten XOF usage, domain separation, and scalar mapping.

  • Use exact out_len, not 0xFFFF.
  • Add a domain separation tag.
  • Consider including the message per BDN variant (compatibility dependent).
  • Derive scalars uniformly via from_bytes_wide with 64 bytes per coeff (128‑bit is OK but sub‑optimal).

Apply:

-    pub fn calc_coefficients(pub_keys: &[PublicKey]) -> Result<Vec<Scalar>, BLSError> {
-        let mut hasher = Blake2xs::new(0xFFFF);
+    pub fn calc_coefficients(pub_keys: &[PublicKey]) -> Result<Vec<Scalar>, BLSError> {
+        // 64 bytes per coefficient; uniform reduction via from_bytes_wide
+        let out_len = pub_keys.len() * 64;
+        let mut hasher = Blake2xs::new(out_len);
 
-        // Hash all public keys
+        // Domain separation + all public keys (compressed)
+        hasher.update(b"F3-BDN-COEFF-V1");
         for pub_key in pub_keys {
             let bytes = pub_key.as_bytes();
             hasher.update(&bytes);
         }
 
-        // Read 16 bytes per public key
         let mut reader = hasher.finalize_xof();
-        let mut output = vec![0u8; pub_keys.len() * 16];
+        let mut output = vec![0u8; out_len];
         reader.read(&mut output);
 
-        // Convert every consecutive 16 bytes chunk to a scalar
         let mut coefficients = Vec::with_capacity(pub_keys.len());
-        for i in 0..pub_keys.len() {
-            let chunk = &output[i * 16..(i + 1) * 16];
-
-            // Convert 16 bytes to 32 bytes, for scalar (pad with zeros)
-            let mut bytes_32 = [0u8; 32];
-            bytes_32[..16].copy_from_slice(chunk);
-
-            // BLS12-381 scalars expects little-endian byte representation
-            let scalar = Scalar::from_bytes(&bytes_32);
-            if scalar.is_some().into() {
-                coefficients.push(scalar.unwrap());
-            } else {
-                return Err(BLSError::InvalidScalar);
-            }
+        for i in 0..pub_keys.len() {
+            let chunk = &output[i * 64..(i + 1) * 64];
+            let mut wide = [0u8; 64];
+            wide.copy_from_slice(chunk);
+            let scalar = Scalar::from_bytes_wide(&wide); // uniform mod r
+            coefficients.push(scalar);
         }
 
         Ok(coefficients)
     }

Verification needed: does go-f3 include message in the coefficient hash (L vs L||msg)? Aligning is critical for cross‑impl compatibility.


🌐 Web query:

In go-f3's BDN multi-signature implementation, are coefficients derived from H(L) only, or H(L || msg)? Provide file/line refs if possible.

💡 Result:

Short answer: coefficients are derived from H(L) only (i.e., from the public-key list), not H(L || msg).

Evidence:

  • kyber's BDN docs/state that AggregateSignatures uses c = H(pk) (coefficient = hash of the public key / key list). [1]
  • go-f3 uses a BDN-style BLS implementation in its blssig module (BDN aggregation is used for multisig). [2][3]

References:

  1. kyber bdn package docs — AggregateSignatures: "aggregates the signatures using a coefficient for each one of them where c = H(pk)". (pkg: github.com/.../kyber/sign/bdn). [1]
  2. go-f3 repository — blssig module (uses BDN/BDN-style aggregation). (go-f3/blssig). [2]
  3. Filecoin / bls/BDN references in Filecoin docs (BLS aggregation / BDN multisig). [3]

If you want I can open the exact kyber bdn.go source and paste the exact file+line reference.


Tighten XOF usage and scalar mapping; maintain BDN compatibility

  • Use exact out_len = pub_keys.len() * 64 and initialize Blake2xs with that.
  • Prepend a clear domain-separation tag (e.g. b"F3-BDN-COEFF-V1") before hashing keys.
  • Read 64 bytes per key and derive scalars via Scalar::from_bytes_wide for uniform reduction.
  • Derive coefficients from H(L) only (no message) to match go-f3’s BDN (c = H(pk list) only).
🤖 Prompt for AI Agents
In blssig/src/bdn/mod.rs around lines 85-118, tighten XOF usage and scalar
mapping: initialize the Blake2xs XOF to produce out_len = pub_keys.len() * 64
bytes (not 0xFFFF), prepend a domain-separation tag (e.g. b"F3-BDN-COEFF-V1") to
the hasher before updating with each pubkey, finalize the XOF and read 64 bytes
per public key into the output buffer, and for each 64-byte chunk derive the
coefficient using Scalar::from_bytes_wide (instead of padding 16→32 and
from_bytes) so reduction is uniform; return InvalidScalar on any mapping failure
and ensure only H(L) (the hashed list with the domain tag) is used to derive
coefficients (no extra message).


pub fn calc_terms(pub_keys: &[PublicKey], coefficients: &[Scalar]) -> Vec<PublicKey> {
pub_keys
.par_iter()
.enumerate()
.map(|(i, pub_key)| {
let pub_key_point: G1Projective = (*pub_key).into();
let pub_c = pub_key_point * coefficients[i];
let term = pub_c + pub_key_point;
term.into()
})
.collect()
}
}
63 changes: 49 additions & 14 deletions blssig/src/verifier/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use filecoin_f3_gpbft::PubKey;
use filecoin_f3_gpbft::api::Verifier;
use hashlink::LruCache;
use parking_lot::RwLock;
use std::sync::Arc;
use thiserror::Error;

use crate::bdn::BDNAggregation;
Expand All @@ -17,6 +18,8 @@ mod tests;
pub enum BLSError {
#[error("empty public keys provided")]
EmptyPublicKeys,
#[error("empty signers provided")]
EmptySigners,
#[error("empty signatures provided")]
EmptySignatures,
#[error("invalid public key length: expected {BLS_PUBLIC_KEY_LENGTH} bytes, got {0}")]
Expand All @@ -31,17 +34,24 @@ pub enum BLSError {
SignatureVerificationFailed,
#[error("mismatched number of public keys and signatures: {pub_keys} != {sigs}")]
LengthMismatch { pub_keys: usize, sigs: usize },
#[error("invalid scalar value")]
InvalidScalar,
#[error("signer index {0} is out of range")]
SignerIndexOutOfRange(usize),
}

/// BLS signature verifier using BDN aggregation scheme
///
/// This verifier implements the same scheme used by `go-f3/blssig`, with:
/// - BLS12_381 curve
/// - G1 for public keys, G2 for signatures
/// - G1 for public keys, G2 for signatures
/// - BDN aggregation for rogue-key attack prevention
pub struct BLSVerifier {
/// Cache for deserialized public key points to avoid expensive repeated operations
point_cache: RwLock<LruCache<Vec<u8>, PublicKey>>,
/// Cache for current power table's BDN aggregation
/// Only caches the current since power tables don't tend to repeat after rotation
bdn_cache: RwLock<Option<(Vec<PubKey>, Arc<BDNAggregation>)>>,
}

impl Default for BLSVerifier {
Expand All @@ -64,6 +74,7 @@ impl BLSVerifier {
Self {
// key size: 48, value size: 196, total estimated: 1.83 MiB
point_cache: RwLock::new(LruCache::new(MAX_POINT_CACHE_SIZE)),
bdn_cache: RwLock::new(None),
}
}

Expand Down Expand Up @@ -114,6 +125,34 @@ impl BLSVerifier {
fn deserialize_signature(&self, sig: &[u8]) -> Result<Signature, BLSError> {
Signature::from_bytes(sig).map_err(BLSError::SignatureDeserialization)
}

/// Gets a cached BDN aggregation or creates and caches it
fn get_or_cache_bdn(&self, power_table: &[PubKey]) -> Result<Arc<BDNAggregation>, BLSError> {
// Check cache first
if let Some((cached_power_table, cached_bdn)) = self.bdn_cache.read().as_ref() {
if cached_power_table == power_table {
return Ok(cached_bdn.clone());
}
}

// Deserialize and create new BDN aggregation
let mut typed_pub_keys = vec![];
for pub_key in power_table {
if pub_key.0.len() != BLS_PUBLIC_KEY_LENGTH {
return Err(BLSError::InvalidPublicKeyLength(pub_key.0.len()));
}
typed_pub_keys.push(self.get_or_cache_public_key(&pub_key.0)?);
}

let bdn = Arc::new(BDNAggregation::new(typed_pub_keys)?);

// Cache it
self.bdn_cache
.write()
.replace((power_table.to_vec(), bdn.clone()));

Ok(bdn)
}
}

impl Verifier for BLSVerifier {
Expand Down Expand Up @@ -154,31 +193,27 @@ impl Verifier for BLSVerifier {
}

let bdn = BDNAggregation::new(typed_pub_keys)?;
let agg_sig = bdn.aggregate_sigs(typed_sigs)?;
let indices: Vec<u64> = (0..typed_sigs.len() as u64).collect();
let agg_sig = bdn.aggregate_sigs(&indices, &typed_sigs)?;
Ok(agg_sig.as_bytes())
}

fn verify_aggregate(
&self,
payload: &[u8],
agg_sig: &[u8],
signers: &[PubKey],
power_table: &[PubKey],
signer_indices: &[u64],
) -> Result<(), Self::Error> {
if signers.is_empty() {
if power_table.is_empty() {
return Err(BLSError::EmptyPublicKeys);
}

let mut typed_pub_keys = vec![];
for pub_key in signers {
if pub_key.0.len() != BLS_PUBLIC_KEY_LENGTH {
return Err(BLSError::InvalidPublicKeyLength(pub_key.0.len()));
}

typed_pub_keys.push(self.get_or_cache_public_key(&pub_key.0)?);
if signer_indices.is_empty() {
return Err(BLSError::EmptySigners);
}

let bdn = BDNAggregation::new(typed_pub_keys)?;
let agg_pub_key = bdn.aggregate_pub_keys()?;
let bdn = self.get_or_cache_bdn(power_table)?;
let agg_pub_key = bdn.aggregate_pub_keys(signer_indices)?;
let agg_pub_key_bytes = PubKey(agg_pub_key.as_bytes().to_vec());
self.verify_single(&agg_pub_key_bytes, payload, agg_sig)
}
Expand Down
47 changes: 47 additions & 0 deletions blssig/src/verifier/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0, MIT

use super::BLSVerifier;
use crate::bdn::BDNAggregation;
use bls_signatures::{PrivateKey, Serialize};
use filecoin_f3_gpbft::PubKey;
use filecoin_f3_gpbft::api::Verifier;
Expand Down Expand Up @@ -68,3 +69,49 @@ fn test_invalid_signature() {
"corrupted signature should fail verification"
);
}

#[test]
fn test_aggregate_signature_verification() {
let verifier = BLSVerifier::new();
let message = b"consensus message";

// Generate 10 validators for the power table
let mut signers = Vec::new();
let mut power_table = Vec::new();
for _ in 0..10 {
let private_key = PrivateKey::generate(&mut rand::thread_rng());
let signer = BLSSigner::new(private_key);
power_table.push(signer.public_key().clone());
signers.push(signer);
}

// Only 5 validators sign (indices 0, 2, 4, 6, 8)
let signers_subset = vec![0u64, 2, 4, 6, 8];
let sigs: Vec<Vec<u8>> = signers_subset
.iter()
.map(|&i| signers[i as usize].sign(message))
.collect();

// Aggregate using BDN with full power table
let typed_pub_keys: Vec<_> = power_table
.iter()
.map(|pk| bls_signatures::PublicKey::from_bytes(&pk.0).unwrap())
.collect();
let typed_sigs: Vec<_> = sigs
.iter()
.map(|sig| bls_signatures::Signature::from_bytes(sig).unwrap())
.collect();

let bdn = BDNAggregation::new(typed_pub_keys).expect("BDN creation should succeed");
let agg_sig = bdn
.aggregate_sigs(&signers_subset, &typed_sigs)
.expect("aggregation should succeed");

// Verify aggregate using full power table and signer indices
let result =
verifier.verify_aggregate(message, &agg_sig.as_bytes(), &power_table, &signers_subset);
assert!(
result.is_ok(),
"aggregate signature verification should succeed"
);
}
Loading
Loading