Skip to content

Commit 286ea1d

Browse files
committed
Move reading of stackerdb to refresh dkg call to make logic easier to follow
Signed-off-by: Jacinta Ferrant <[email protected]>
1 parent d0c8871 commit 286ea1d

File tree

2 files changed

+57
-36
lines changed

2 files changed

+57
-36
lines changed

stacks-signer/src/runloop.rs

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,9 @@ impl SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop {
384384
continue;
385385
}
386386
if signer.approved_aggregate_public_key.is_none() {
387-
if let Err(e) = signer.refresh_dkg(&self.stacks_client) {
387+
if let Err(e) =
388+
signer.refresh_dkg(&self.stacks_client, res.clone(), current_reward_cycle)
389+
{
388390
error!("{signer}: failed to refresh DKG: {e}");
389391
}
390392
}
@@ -415,16 +417,6 @@ impl SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop {
415417
signer.commands.push_back(command.command);
416418
}
417419
}
418-
// Check if we missed any DKG messages due to a restart or being late to the party
419-
// Note: We currently only check for DKG specific messages as we cannot rejoin a sign
420-
// round due to a miner overwriting its own message slots (impossible to recover without every message)
421-
if let Err(e) = signer.read_dkg_stackerdb_messages(
422-
&self.stacks_client,
423-
res.clone(),
424-
current_reward_cycle,
425-
) {
426-
error!("{signer}: failed to read stackerdb messages: {e}");
427-
}
428420
// After processing event, run the next command for each signer
429421
signer.process_next_command(&self.stacks_client, current_reward_cycle);
430422
}

stacks-signer/src/signer.rs

Lines changed: 54 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ impl Signer {
241241
if self.state != State::Uninitialized {
242242
// We should only read stackerdb if we are uninitialized
243243
return Ok(());
244-
};
244+
}
245245
let ordered_packets = self
246246
.stackerdb
247247
.get_dkg_packets(&self.signer_slot_ids)?
@@ -1403,14 +1403,44 @@ impl Signer {
14031403
}
14041404
}
14051405

1406-
/// Refresh DKG value and queue DKG command if necessary
1407-
pub fn refresh_dkg(&mut self, stacks_client: &StacksClient) -> Result<(), ClientError> {
1408-
// First check if we should queue DKG based on contract vote state and stackerdb transactions
1409-
let should_queue = self.should_queue_dkg(stacks_client)?;
1410-
// Before queueing the command, check one last time if DKG has been
1411-
// approved. It could have happened after the last call to
1412-
// `get_approved_aggregate_key` but before the theshold check in
1413-
// `should_queue_dkg`.
1406+
/// Refresh DKG and queue it if required
1407+
pub fn refresh_dkg(
1408+
&mut self,
1409+
stacks_client: &StacksClient,
1410+
res: Sender<Vec<OperationResult>>,
1411+
current_reward_cycle: u64,
1412+
) -> Result<(), ClientError> {
1413+
// First attempt to retrieve the aggregate key from the contract.
1414+
self.update_approved_aggregate_key(stacks_client)?;
1415+
if self.approved_aggregate_public_key.is_some() {
1416+
return Ok(());
1417+
}
1418+
// Check stackerdb for any missed DKG messages to catch up our state.
1419+
self.read_dkg_stackerdb_messages(&stacks_client, res, current_reward_cycle)?;
1420+
// Check if we should still queue DKG
1421+
if !self.should_queue_dkg(stacks_client)? {
1422+
return Ok(());
1423+
}
1424+
// Because there could be a slight delay in reading pending transactions and a key being approved by the contract,
1425+
// check one last time if the approved key was set since we finished the should queue dkg call
1426+
self.update_approved_aggregate_key(stacks_client)?;
1427+
if self.approved_aggregate_public_key.is_some() {
1428+
return Ok(());
1429+
}
1430+
if self.commands.front() != Some(&Command::Dkg) {
1431+
info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command...");
1432+
self.commands.push_front(Command::Dkg);
1433+
} else {
1434+
debug!("{self}: DKG command already queued...");
1435+
}
1436+
Ok(())
1437+
}
1438+
1439+
/// Overwrites the approved aggregate key to the value in the contract, updating state accordingly
1440+
pub fn update_approved_aggregate_key(
1441+
&mut self,
1442+
stacks_client: &StacksClient,
1443+
) -> Result<(), ClientError> {
14141444
let old_dkg = self.approved_aggregate_public_key;
14151445
self.approved_aggregate_public_key =
14161446
stacks_client.get_approved_aggregate_key(self.reward_cycle)?;
@@ -1430,22 +1460,21 @@ impl Signer {
14301460
self.approved_aggregate_public_key
14311461
);
14321462
}
1433-
if let State::OperationInProgress(Operation::Dkg) = self.state {
1434-
debug!(
1435-
"{self}: DKG has already been set. Aborting DKG operation {}.",
1436-
self.coordinator.current_dkg_id
1437-
);
1438-
self.finish_operation();
1439-
} else if self.state == State::Uninitialized {
1440-
// If we successfully load the DKG value, we are fully initialized
1441-
self.state = State::Idle;
1442-
}
1443-
} else if should_queue {
1444-
if self.commands.front() != Some(&Command::Dkg) {
1445-
info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command...");
1446-
self.commands.push_front(Command::Dkg);
1447-
} else {
1448-
debug!("{self}: DKG command already queued...");
1463+
match self.state {
1464+
State::OperationInProgress(Operation::Dkg) => {
1465+
debug!(
1466+
"{self}: DKG has already been set. Aborting DKG operation {}.",
1467+
self.coordinator.current_dkg_id
1468+
);
1469+
self.finish_operation();
1470+
}
1471+
State::Uninitialized => {
1472+
// If we successfully load the DKG value, we are fully initialized
1473+
self.state = State::Idle;
1474+
}
1475+
_ => {
1476+
// do nothing
1477+
}
14491478
}
14501479
}
14511480
Ok(())

0 commit comments

Comments
 (0)