Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ jobs:
docs:
name: Check Rust doc
runs-on: ubuntu-latest
env:
RUSTDOCFLAGS: -D warnings

steps:
- uses: actions/checkout@v4.2.2
Expand Down
35 changes: 20 additions & 15 deletions .mergify.yml
Original file line number Diff line number Diff line change
@@ -1,28 +1,33 @@
queue_rules:
- name: main
allow_inplace_checks: True
allow_checks_interruption: True
speculative_checks: 1
batch_size: 2
# Wait for a few minutes to embark 2 tickets together in a merge train
batch_max_wait_time: "3 minutes"
conditions:
queue_conditions:
- base=main
- -draft
- label!=do-not-merge
merge_conditions:
# Mergify automatically applies status check, approval, and conversation rules,
# which are the same as the GitHub main branch protection rules
# https://docs.mergify.com/conditions/#about-branch-protection
- base=main
allow_inplace_checks: true
batch_size: 2
# Wait for a few minutes to embark 2 tickets together in a merge train
batch_max_wait_time: "3 minutes"
merge_method: squash

pull_request_rules:
- name: main queue triggered when CI passes with 1 review
conditions: []
actions:
queue:

priority_rules:
- name: Priority rule from queue `main`
conditions:
# This queue handles a PR if:
# - it targets main
# - is not in draft
# including automated dependabot PRs.
- base=main
- -draft
- label!=do-not-merge
actions:
queue:
name: main
method: squash
allow_checks_interruption: true

merge_queue:
max_parallel_checks: 1
2 changes: 1 addition & 1 deletion book/src/tutorial/refreshing-shares.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ The refreshed `KeyPackage` contents must be stored securely and the original
Applications should first ensure that all participants who refreshed their
`KeyPackages` were actually able to do so successfully, before deleting their old
`KeyPackages`. How this is done is up to the application; it might require
sucessfully generating a signature with all of those participants.
successfully generating a signature with all of those participants.
```

```admonish danger
Expand Down
8 changes: 4 additions & 4 deletions book/src/zcash/ywallet-demo.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ cargo run --bin trusted-dealer -- -C redpallas

This will by default generate a 2-of-3 key shares. The public key package
will be written to `public-key-package.json`, while key packages will be
written to `key-package-1.json` through `-3`. You can change the threhsold,
written to `key-package-1.json` through `-3`. You can change the threshold,
number of shares and file names using the command line; append `-- -h`
to the commend above for the command line help.
to the command above for the command line help.

```admonish info
If you want to use DKG instead of Trusted Dealer, instead of the command above,
Expand Down Expand Up @@ -198,7 +198,7 @@ cargo run --bin participant -- -C redpallas --http --key-package key-package-1.j
```

(We are using "alice" again. There's nothing stopping a Coordinator from being a
Partcipant too!)
Participant too!)

### Participant 2 (bob)

Expand All @@ -212,7 +212,7 @@ cargo run --bin participant -- -C redpallas --http --key-package key-package-2.j
### Coordinator

Go back to the Coordinator CLI. The protocol should run and complete
succesfully. It will print the final FROST-generated signature. Hurrah! Copy it
successfully. It will print the final FROST-generated signature. Hurrah! Copy it
(just the hex value).

Go back to the signer and paste the signature. It will write the raw signed
Expand Down
2 changes: 1 addition & 1 deletion frost-core/src/identifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ where
/// Deserialize an Identifier from a serialized buffer.
/// Returns an error if it attempts to deserialize zero.
pub fn deserialize(bytes: &[u8]) -> Result<Self, Error<C>> {
Ok(Self(SerializableScalar::deserialize(bytes)?))
Self::new(SerializableScalar::deserialize(bytes)?.0)
}
}

Expand Down
2 changes: 1 addition & 1 deletion frost-core/src/keys.rs
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ where
Self(coefficients)
}

/// Returns serialized coefficent commitments
/// Returns serialized coefficient commitments
pub fn serialize(&self) -> Result<Vec<Vec<u8>>, Error<C>> {
self.0
.iter()
Expand Down
269 changes: 265 additions & 4 deletions frost-core/src/keys/refresh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,18 @@ use alloc::collections::BTreeMap;
use alloc::vec::Vec;

use crate::{
keys::dkg::{compute_proof_of_knowledge, round1, round2},
keys::{
generate_coefficients, generate_secret_shares, validate_num_of_signers,
CoefficientCommitment, PublicKeyPackage, SigningKey, SigningShare, VerifyingShare,
evaluate_polynomial, generate_coefficients, generate_secret_polynomial,
generate_secret_shares, validate_num_of_signers, CoefficientCommitment, PublicKeyPackage,
SigningKey, SigningShare, VerifyingShare,
},
Ciphersuite, CryptoRng, Error, Field, Group, Identifier, RngCore,
Ciphersuite, CryptoRng, Error, Field, Group, Header, Identifier, RngCore,
};

use super::{KeyPackage, SecretShare, VerifiableSecretSharingCommitment};
use core::iter;

use super::{dkg::round1::Package, KeyPackage, SecretShare, VerifiableSecretSharingCommitment};

/// Generates new zero key shares and a public key package using a trusted
/// dealer Building a new public key package is done by taking the verifying
Expand Down Expand Up @@ -114,3 +118,260 @@ pub fn refresh_share<C: Ciphersuite>(

Ok(new_key_package)
}

/// Part 1 of refresh share with DKG. A refreshing_key is generated and a new package and secret_package are generated.
/// The identity commitment is removed from the packages.
pub fn refresh_dkg_part_1<C: Ciphersuite, R: RngCore + CryptoRng>(
identifier: Identifier<C>,
max_signers: u16,
min_signers: u16,
mut rng: R,
) -> Result<(round1::SecretPackage<C>, round1::Package<C>), Error<C>> {
validate_num_of_signers::<C>(min_signers, max_signers)?;

// Build refreshing shares
let refreshing_key = SigningKey {
scalar: <<C::Group as Group>::Field>::zero(),
};

// Round 1, Step 1
let coefficients = generate_coefficients::<C, R>(min_signers as usize - 1, &mut rng);

let (coefficients, commitment) =
generate_secret_polynomial(&refreshing_key, max_signers, min_signers, coefficients)?;

// Remove identity element from coefficients
let mut coeff_comms = commitment.0;
coeff_comms.remove(0);
let commitment = VerifiableSecretSharingCommitment::new(coeff_comms.clone());

let proof_of_knowledge =
compute_proof_of_knowledge(identifier, &coefficients, &commitment, &mut rng)?;

let secret_package = round1::SecretPackage {
identifier,
coefficients: coefficients.clone(),
commitment: commitment.clone(),
min_signers,
max_signers,
};
let package = round1::Package {
header: Header::default(),
commitment,
proof_of_knowledge,
};

Ok((secret_package, package))
}

/// Part 2 of refresh share with DKG. The identity commitment needs to be added back into the secret package.
pub fn refresh_dkg_part2<C: Ciphersuite>(
mut secret_package: round1::SecretPackage<C>,
round1_packages: &BTreeMap<Identifier<C>, round1::Package<C>>,
) -> Result<
(
round2::SecretPackage<C>,
BTreeMap<Identifier<C>, round2::Package<C>>,
),
Error<C>,
> {
if round1_packages.len() != (secret_package.max_signers - 1) as usize {
return Err(Error::IncorrectNumberOfPackages);
}

// The identity commitment needs to be added to the VSS commitment for secret package
let identity_commitment: Vec<CoefficientCommitment<C>> =
vec![CoefficientCommitment::new(C::Group::identity())];

let refreshing_secret_share_commitments: Vec<CoefficientCommitment<C>> = identity_commitment
.into_iter()
.chain(secret_package.commitment.0.clone())
.collect();

secret_package.commitment =
VerifiableSecretSharingCommitment::<C>::new(refreshing_secret_share_commitments);

let mut round2_packages = BTreeMap::new();

for (sender_identifier, round1_package) in round1_packages {
// The identity commitment needs to be added to the VSS commitment for every round 1 package
let identity_commitment: Vec<CoefficientCommitment<C>> =
vec![CoefficientCommitment::new(C::Group::identity())];

let refreshing_share_commitments: Vec<CoefficientCommitment<C>> = identity_commitment
.into_iter()
.chain(round1_package.commitment.0.clone())
.collect();

if refreshing_share_commitments.clone().len() != secret_package.min_signers as usize {
return Err(Error::IncorrectNumberOfCommitments);
}

let ell = *sender_identifier;

// Round 1, Step 5
// We don't need to verify the proof of knowledge

// Round 2, Step 1
//
// > Each P_i securely sends to each other participant P_ℓ a secret share (ℓ, f_i(ℓ)),
// > deleting f_i and each share afterward except for (i, f_i(i)),
// > which they keep for themselves.
let signing_share = SigningShare::from_coefficients(&secret_package.coefficients, ell);

round2_packages.insert(
ell,
round2::Package {
header: Header::default(),
signing_share,
},
);
}
let fii = evaluate_polynomial(secret_package.identifier, &secret_package.coefficients);

Ok((
round2::SecretPackage {
identifier: secret_package.identifier,
commitment: secret_package.commitment,
secret_share: fii,
min_signers: secret_package.min_signers,
max_signers: secret_package.max_signers,
},
round2_packages,
))
}

/// This is the step that actually refreshes the shares. New public key packages
/// and key packages are created.
pub fn refresh_dkg_shares<C: Ciphersuite>(
round2_secret_package: &round2::SecretPackage<C>,
round1_packages: &BTreeMap<Identifier<C>, round1::Package<C>>,
round2_packages: &BTreeMap<Identifier<C>, round2::Package<C>>,
old_pub_key_package: PublicKeyPackage<C>,
old_key_package: KeyPackage<C>,
) -> Result<(KeyPackage<C>, PublicKeyPackage<C>), Error<C>> {
// Add identity commitment back into round1_packages
let mut new_round_1_packages = BTreeMap::new();
for (sender_identifier, round1_package) in round1_packages {
// The identity commitment needs to be added to the VSS commitment for every round 1 package
let identity_commitment: Vec<CoefficientCommitment<C>> =
vec![CoefficientCommitment::new(C::Group::identity())];

let refreshing_share_commitments: Vec<CoefficientCommitment<C>> = identity_commitment
.into_iter()
.chain(round1_package.commitment.0.clone())
.collect();

let new_commitments =
VerifiableSecretSharingCommitment::<C>::new(refreshing_share_commitments);

let new_round_1_package = Package {
header: round1_package.header,
commitment: new_commitments,
proof_of_knowledge: round1_package.proof_of_knowledge,
};

new_round_1_packages.insert(*sender_identifier, new_round_1_package);
}

if new_round_1_packages.len() != (round2_secret_package.max_signers - 1) as usize {
return Err(Error::IncorrectNumberOfPackages);
}
if new_round_1_packages.len() != round2_packages.len() {
return Err(Error::IncorrectNumberOfPackages);
}
if new_round_1_packages
.keys()
.any(|id| !round2_packages.contains_key(id))
{
return Err(Error::IncorrectPackage);
}

let mut signing_share = <<C::Group as Group>::Field>::zero();

for (sender_identifier, round2_package) in round2_packages {
// Round 2, Step 2
//
// > Each P_i verifies their shares by calculating:
// > g^{f_ℓ(i)} ≟ ∏^{t−1}_{k=0} φ^{i^k mod q}_{ℓk}, aborting if the
// > check fails.
let ell = *sender_identifier;
let f_ell_i = round2_package.signing_share;

let commitment = &new_round_1_packages
.get(&ell)
.ok_or(Error::PackageNotFound)?
.commitment;

// The verification is exactly the same as the regular SecretShare verification;
// however the required components are in different places.
// Build a temporary SecretShare so what we can call verify().
let secret_share = SecretShare {
header: Header::default(),
identifier: round2_secret_package.identifier,
signing_share: f_ell_i,
commitment: commitment.clone(),
};

// Verify the share. We don't need the result.
let _ = secret_share.verify()?;

// Round 2, Step 3
//
// > Each P_i calculates their long-lived private signing share by computing
// > s_i = ∑^n_{ℓ=1} f_ℓ(i), stores s_i securely, and deletes each f_ℓ(i).
signing_share = signing_share + f_ell_i.to_scalar();
}

signing_share = signing_share + round2_secret_package.secret_share;

// Build new signing share
let old_signing_share = old_key_package.signing_share.to_scalar();
signing_share = signing_share + old_signing_share;
let signing_share = SigningShare::new(signing_share);

// Round 2, Step 4
//
// > Each P_i calculates their public verification share Y_i = g^{s_i}.
let verifying_share = signing_share.into();

let commitments: BTreeMap<_, _> = new_round_1_packages
.iter()
.map(|(id, package)| (*id, &package.commitment))
.chain(iter::once((
round2_secret_package.identifier,
&round2_secret_package.commitment,
)))
.collect();

let zero_shares_public_key_package = PublicKeyPackage::from_dkg_commitments(&commitments)?;

let mut new_verifying_shares = BTreeMap::new();

for (identifier, verifying_share) in zero_shares_public_key_package.verifying_shares {
let new_verifying_share = verifying_share.to_element()
+ old_pub_key_package
.verifying_shares
.get(&identifier)
.ok_or(Error::UnknownIdentifier)?
.to_element();
new_verifying_shares.insert(identifier, VerifyingShare::new(new_verifying_share));
}

let public_key_package = PublicKeyPackage {
header: old_pub_key_package.header,
verifying_shares: new_verifying_shares,
verifying_key: old_pub_key_package.verifying_key,
};

let key_package = KeyPackage {
header: Header::default(),
identifier: round2_secret_package.identifier,
signing_share,
verifying_share,
verifying_key: public_key_package.verifying_key,
min_signers: round2_secret_package.min_signers,
};

Ok((key_package, public_key_package))
}
Loading
Loading