diff --git a/Cargo.lock b/Cargo.lock index a7746263e9c2d..bb531babf3ef8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5276,7 +5276,7 @@ dependencies = [ "anyhow", "cumulus-test-runtime", "cumulus-zombienet-sdk-helpers", - "env_logger 0.11.3", + "env_logger 0.11.8", "futures", "log", "parity-scale-codec", @@ -6187,14 +6187,14 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" dependencies = [ "anstream", "anstyle", "env_filter", - "humantime", + "jiff", "log", ] @@ -8840,6 +8840,30 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "jiff" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", +] + +[[package]] +name = "jiff-static" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +dependencies = [ + "proc-macro2 1.0.95", + "quote 1.0.40", + "syn 2.0.98", +] + [[package]] name = "jni" version = "0.21.1" @@ -13338,7 +13362,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap", - "env_logger 0.11.3", + "env_logger 0.11.8", "futures", "git2", "hex", @@ -13625,7 +13649,7 @@ name = "pallet-staking-async" version = "0.1.0" dependencies = [ "anyhow", - "env_logger 0.11.3", + "env_logger 0.11.8", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -15219,6 +15243,7 @@ name = "polkadot-availability-distribution" version = "7.0.0" dependencies = [ "assert_matches", + "async-trait", "fatality", "futures", "futures-timer", @@ -15235,6 +15260,7 @@ dependencies = [ "rand 0.8.5", "rstest", "sc-network", + "sc-network-types", "schnellru", "sp-core 28.0.0", "sp-keyring", @@ -15903,6 +15929,27 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-rewards-statistics-collector" +version = "6.0.0" +dependencies = [ + "assert_matches", + "fatality", + "futures", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sp-application-crypto", + "sp-authority-discovery", + "sp-core 28.0.0", + "sp-keystore", + "sp-tracing 16.0.0", + "thiserror 1.0.65", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-runtime-api" version = "7.0.0" @@ -17052,6 +17099,7 @@ dependencies = [ "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", + "polkadot-node-core-rewards-statistics-collector", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -17190,6 +17238,7 @@ dependencies = [ "polkadot-node-core-approval-voting-parallel", "polkadot-node-core-av-store", "polkadot-node-core-dispute-coordinator", + "polkadot-node-core-rewards-statistics-collector", "polkadot-node-metrics", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -17397,7 +17446,7 @@ dependencies = [ "anyhow", "asset-hub-westend-runtime", "cumulus-zombienet-sdk-helpers", - "env_logger 0.11.3", + "env_logger 0.11.8", "ethabi-decode", "futures", "log", @@ -17603,6 +17652,15 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "portpicker" version = "0.1.1" @@ -21137,7 +21195,7 @@ dependencies = [ "chrono", "criterion", "cumulus-zombienet-sdk-helpers", - "env_logger 0.11.3", + "env_logger 0.11.8", "futures", "futures-timer", "indexmap 2.9.0", @@ -25571,7 +25629,7 @@ name = "template-zombienet-tests" version = "0.0.0" dependencies = [ "anyhow", - "env_logger 0.11.3", + "env_logger 0.11.8", "tokio", "zombienet-sdk", ] @@ -25649,7 +25707,7 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e33b98a582ea0be1168eba097538ee8dd4bbe0f2b01b22ac92ea30054e5be7b" dependencies = [ - "env_logger 0.11.3", + "env_logger 0.11.8", "test-log-macros", "tracing-subscriber 0.3.18", ] diff --git a/Cargo.toml b/Cargo.toml index afae7745fd78d..16f53ad770c5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,6 +179,7 @@ members = [ "polkadot/node/core/dispute-coordinator", "polkadot/node/core/parachains-inherent", "polkadot/node/core/prospective-parachains", + "polkadot/node/core/rewards-statistics-collector", "polkadot/node/core/provisioner", "polkadot/node/core/pvf", "polkadot/node/core/pvf-checker", @@ -1139,6 +1140,7 @@ polkadot-node-core-chain-selection = { path = "polkadot/node/core/chain-selectio polkadot-node-core-dispute-coordinator = { path = "polkadot/node/core/dispute-coordinator", default-features = false } polkadot-node-core-parachains-inherent = { path = "polkadot/node/core/parachains-inherent", default-features = false } polkadot-node-core-prospective-parachains = { path = "polkadot/node/core/prospective-parachains", default-features = false } +polkadot-node-core-rewards-statistics-collector = { path = "polkadot/node/core/rewards-statistics-collector", default-features = false } polkadot-node-core-provisioner = { path = "polkadot/node/core/provisioner", default-features = false } polkadot-node-core-pvf = { path = "polkadot/node/core/pvf", default-features = false } polkadot-node-core-pvf-checker = { path = "polkadot/node/core/pvf-checker", default-features = false } diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index b989f81efd5dc..02957c803314f 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -419,6 +419,7 @@ fn build_polkadot_full_node( keep_finalized_for: None, invulnerable_ah_collators: HashSet::new(), collator_protocol_hold_off: None, + verbose_approval_metrics: false, }; let (relay_chain_full_node, paranode_req_receiver) = match config.network.network_backend { diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index acc2782caa78d..dacac6518a271 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -305,6 +305,18 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { .await?) } + async fn submit_approval_statistics( + &self, + at: Hash, + payload: polkadot_primitives::vstaging::ApprovalStatistics, + signature: polkadot_primitives::ValidatorSignature, + ) -> Result<(), sp_api::ApiError> { + Ok(self + .rpc_client + .parachain_host_submit_approval_statistics(at, payload, signature) + .await?) + } + async fn pvfs_require_precheck( &self, at: Hash, diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 80858a665cfaf..cdb35353cac83 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -54,6 +54,7 @@ use sp_version::RuntimeVersion; use crate::{metrics::RelaychainRpcMetrics, reconnecting_ws_client::ReconnectingWebsocketWorker}; pub use url::Url; +use cumulus_primitives_core::relay_chain::vstaging::ApprovalStatistics; const LOG_TARGET: &str = "relay-chain-rpc-client"; const NOTIFICATION_CHANNEL_SIZE_LIMIT: usize = 20; @@ -261,6 +262,21 @@ impl RelayChainRpcClient { .await } + /// Submits approval voting rewards statistics into the transaction pool. + pub async fn parachain_host_submit_approval_statistics( + &self, + at: RelayHash, + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) -> Result<(), RelayChainError> { + self.call_remote_runtime_function( + "ParachainHost_submit_approval_statistics", + at, + Some((payload, signature)), + ) + .await + } + /// Get system health information pub async fn system_health(&self) -> Result { self.request("system_health", rpc_params![]).await diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index da25b8df0a0b3..daa39afc5901f 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -552,3 +552,12 @@ pub async fn wait_for_runtime_upgrade( Err(anyhow!("Did not find a runtime upgrade")) } + +pub fn report_label_with_attributes(label: &str, attributes: Vec<(&str, &str)>) -> String { + let mut attrs: Vec = vec![]; + for (k, v) in attributes { + attrs.push(format!("{}=\"{}\"", k, v)); + } + let final_attrs = attrs.join(","); + format!("{label}{{{final_attrs}}}") +} \ No newline at end of file diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index fa8595cc7c57c..0d664a3b46212 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -166,6 +166,11 @@ pub struct RunCmd { /// **Dangerous!** Do not touch unless explicitly advised to. #[arg(long, hide = true)] pub collator_protocol_hold_off: Option, + + /// Enable or disable per validator collected approvals metrics + /// to be published to prometheus. If not specified, set to false. + #[arg(long)] + pub verbose_approval_metrics: Option, } #[allow(missing_docs)] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index b9366d07f6c54..2d30e50a77cd9 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -278,6 +278,10 @@ where telemetry_worker_handle: None, node_version, secure_validator_mode, + verbose_approval_metrics: cli + .run + .verbose_approval_metrics + .unwrap_or(false), workers_path: cli.run.workers_path, workers_names: None, overseer_gen, diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 70ec6bff440b7..db32cfbe3531e 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -83,6 +83,8 @@ fn main() -> Result<(), String> { ("Sent to peers", 63995.2200, 0.01), ])); messages.extend(average_usage.check_cpu_usage(&[("approval-voting-parallel", 12.3817, 0.1)])); + messages.extend(average_usage.check_cpu_usage(&[("rewards-statistics-collector", 12.3817, 0.1)])); + if messages.is_empty() { Ok(()) diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index a37bb53f79ffa..a6b712f619d34 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -66,7 +66,7 @@ use crate::{ }; use polkadot_node_primitives::approval::time::{slot_number_to_tick, Tick}; - +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; use super::{State, LOG_TARGET}; #[derive(Debug)] @@ -337,7 +337,8 @@ pub struct BlockImportedCandidates { pub(crate) async fn handle_new_head< Sender: SubsystemSender + SubsystemSender - + SubsystemSender, + + SubsystemSender + + SubsystemSender, AVSender: SubsystemSender, B: Backend, >( diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 41c461ba37059..e4c35af049eb7 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -95,7 +95,7 @@ use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; use polkadot_node_primitives::approval::time::{ slot_number_to_tick, Clock, ClockExt, DelayedApprovalTimer, SystemClock, Tick, }; - +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; mod approval_checking; pub mod approval_db; mod backend; @@ -1249,6 +1249,7 @@ async fn run< Sender: SubsystemSender + SubsystemSender + SubsystemSender + + SubsystemSender + SubsystemSender + SubsystemSender + SubsystemSender @@ -1484,6 +1485,7 @@ pub async fn start_approval_worker< + SubsystemSender + SubsystemSender + SubsystemSender + + SubsystemSender + SubsystemSender + SubsystemSender + Clone, @@ -1563,6 +1565,7 @@ async fn handle_actions< + SubsystemSender + SubsystemSender + SubsystemSender + + SubsystemSender + Clone, ADSender: SubsystemSender, >( @@ -2029,6 +2032,7 @@ async fn handle_from_overseer< Sender: SubsystemSender + SubsystemSender + SubsystemSender + + SubsystemSender + Clone, ADSender: SubsystemSender, >( @@ -2609,11 +2613,10 @@ fn schedule_wakeup_action( block_hash: Hash, block_number: BlockNumber, candidate_hash: CandidateHash, - block_tick: Tick, + approval_status: &ApprovalStatus, tick_now: Tick, - required_tranches: RequiredTranches, ) -> Option { - let maybe_action = match required_tranches { + let maybe_action = match approval_status.required_tranches { _ if approval_entry.is_approved() => None, RequiredTranches::All => None, RequiredTranches::Exact { next_no_show, last_assignment_tick, .. } => { @@ -2652,7 +2655,7 @@ fn schedule_wakeup_action( // Apply the clock drift to these tranches. min_prefer_some(next_announced, our_untriggered) - .map(|t| t as Tick + block_tick + clock_drift) + .map(|t| t as Tick + approval_status.block_tick + clock_drift) }; min_prefer_some(next_non_empty_tranche, next_no_show).map(|tick| { @@ -2667,14 +2670,14 @@ fn schedule_wakeup_action( tick, ?candidate_hash, ?block_hash, - block_tick, + approval_status.block_tick, "Scheduling next wakeup.", ), None => gum::trace!( target: LOG_TARGET, ?candidate_hash, ?block_hash, - block_tick, + approval_status.block_tick, "No wakeup needed.", ), Some(_) => {}, // unreachable @@ -2852,9 +2855,8 @@ where block_entry.block_hash(), block_entry.block_number(), *assigned_candidate_hash, - status.block_tick, + &status, tick_now, - status.required_tranches, )); } @@ -2906,7 +2908,8 @@ async fn import_approval( wakeups: &Wakeups, ) -> SubsystemResult<(Vec, ApprovalCheckResult)> where - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender, { macro_rules! respond_early { ($e: expr) => {{ @@ -3059,7 +3062,8 @@ async fn advance_approval_state( wakeups: &Wakeups, ) -> Vec where - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender, { let validator_index = transition.validator_index(); @@ -3173,7 +3177,7 @@ where return Vec::new() }; - { + let newly_approved = { let approval_entry = candidate_entry .approval_entry_mut(&block_hash) .expect("Approval entry just fetched; qed"); @@ -3184,17 +3188,18 @@ where if is_approved { approval_entry.mark_approved(); } + if newly_approved { state.record_no_shows(session_index, para_id.into(), &status.no_show_validators); } + actions.extend(schedule_wakeup_action( &approval_entry, block_hash, block_number, candidate_hash, - status.block_tick, + &status, tick_now, - status.required_tranches, )); if is_approved && transition.is_remote_approval() { @@ -3234,6 +3239,27 @@ where } } } + + if newly_approved { + gum::info!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "Candidate newly approved, collecting useful approvals..." + ); + + collect_useful_approvals(sender, &status, block_hash, &candidate_entry); + + if status.no_show_validators.len() > 0 { + _ = sender + .try_send_message(RewardsStatisticsCollectorMessage::NoShows( + candidate_entry.candidate.hash(), + block_hash, + status.no_show_validators, + )); + } + } + // We have no need to write the candidate entry if all of the following // is true: // @@ -3247,7 +3273,9 @@ where // In all other cases, we need to write the candidate entry. db.write_candidate_entry(candidate_entry); } - } + + newly_approved + }; actions } @@ -3286,7 +3314,7 @@ fn should_trigger_assignment( } } -async fn process_wakeup>( +async fn process_wakeup( sender: &mut Sender, state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, @@ -3295,7 +3323,11 @@ async fn process_wakeup>( candidate_hash: CandidateHash, metrics: &Metrics, wakeups: &Wakeups, -) -> SubsystemResult> { +) -> SubsystemResult> +where + Sender: SubsystemSender + + SubsystemSender +{ let block_entry = db.load_block_entry(&relay_block)?; let candidate_entry = db.load_candidate_entry(&candidate_hash)?; @@ -3703,7 +3735,8 @@ async fn launch_approval< // have been done. #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn issue_approval< - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender, ADSender: SubsystemSender, >( sender: &mut Sender, @@ -4075,3 +4108,76 @@ fn compute_delayed_approval_sending_tick( metrics.on_delayed_approval(sign_no_later_than.checked_sub(tick_now).unwrap_or_default()); sign_no_later_than } + +// collect all the approvals required to approve the +// candidate, ignoring any other approval that belongs +// to not required tranches +fn collect_useful_approvals( + sender: &mut Sender, + status: &ApprovalStatus, + block_hash: Hash, + candidate_entry: &CandidateEntry, +) +where + Sender: SubsystemSender +{ + let candidate_hash = candidate_entry.candidate.hash(); + let candidate_approvals = candidate_entry.approvals(); + + let approval_entry = match candidate_entry.approval_entry(&block_hash) { + Some(approval_entry) => approval_entry, + None => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "approval entry not found, cannot collect useful approvals." + ); + return + }, + }; + + let collected_useful_approvals: Vec = match status.required_tranches { + RequiredTranches::All => { + candidate_approvals.iter_ones().map(|idx| ValidatorIndex(idx as _)).collect() + }, + RequiredTranches::Exact {needed, ..} => { + let mut assigned_mask = approval_entry.assignments_up_to(needed); + assigned_mask &= candidate_approvals; + assigned_mask.iter_ones().map(|idx| ValidatorIndex(idx as _)).collect() + }, + RequiredTranches::Pending {..} => { + gum::warn!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + "approval status required tranches still pending when collecting useful approvals" + ); + return + }, + }; + + if !collected_useful_approvals.is_empty() { + let useful_approvals = collected_useful_approvals.len(); + gum::debug!( + target: LOG_TARGET, + ?block_hash, + ?candidate_hash, + ?useful_approvals, + "collected useful approvals" + ); + + _ = sender.try_send_message(RewardsStatisticsCollectorMessage::CandidateApproved( + candidate_hash, + block_hash, + collected_useful_approvals, + )).map_err(|_| { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + "Failed to send approvals to statistics subsystem", + ); + }); + } +} \ No newline at end of file diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 71e55a42c9fdc..feab19d8715f8 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -72,6 +72,7 @@ use super::{ use polkadot_primitives_test_helpers::{ dummy_candidate_receipt_v2, dummy_candidate_receipt_v2_bad_sig, }; +use sp_core::blake2_512_into; const SLOT_DURATION_MILLIS: u64 = 5000; @@ -650,6 +651,26 @@ fn make_candidate(para_id: ParaId, hash: &Hash) -> CandidateReceipt { r } +struct ExpectApprovalsStatsCollected { + candidate_hash: CandidateHash, + block_hash: Hash, + validators: Vec, + no_shows: Option<(SessionIndex, Vec)>, +} + +impl ExpectApprovalsStatsCollected { + fn new(candidate_hash: CandidateHash, block_hash: Hash, validators: Vec) -> Self { + Self { + candidate_hash, block_hash, validators, no_shows: None, + } + } + + fn with_no_shows(&mut self, no_shows: (SessionIndex, Vec)) -> &mut Self { + self.no_shows = Some(no_shows); + self + } +} + async fn import_approval( overseer: &mut VirtualOverseer, block_hash: Hash, @@ -658,6 +679,7 @@ async fn import_approval( candidate_hash: CandidateHash, session_index: SessionIndex, expect_chain_approved: bool, + expected_approvals_stats_collected: Option, signature_opt: Option, ) -> oneshot::Receiver { let signature = signature_opt.unwrap_or(sign_approval( @@ -681,6 +703,22 @@ async fn import_approval( }, ) .await; + + if let Some(expected_stats_collected) = expected_approvals_stats_collected { + assert_matches!( + overseer_recv(overseer).await, + AllMessages::RewardsStatisticsCollector( + RewardsStatisticsCollectorMessage::CandidateApproved( + c_hash, b_hash, validators, + ) + ) => { + assert_eq!(b_hash, expected_stats_collected.block_hash); + assert_eq!(c_hash, expected_stats_collected.candidate_hash); + assert_eq!(validators, expected_stats_collected.validators); + } + ); + } + if expect_chain_approved { assert_matches!( overseer_recv(overseer).await, @@ -1277,6 +1315,7 @@ fn subsystem_rejects_approval_if_no_candidate_entry() { session_index, false, None, + None, ) .await; @@ -1318,6 +1357,7 @@ fn subsystem_rejects_approval_if_no_block_entry() { session_index, false, None, + None, ) .await; @@ -1383,6 +1423,7 @@ fn subsystem_rejects_approval_before_assignment() { session_index, false, None, + None, ) .await; @@ -1638,6 +1679,7 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { candidate_hash, session_index, true, + Some(ExpectApprovalsStatsCollected::new(candidate_hash, block_hash, vec![validator])), None, ) .await; @@ -1729,6 +1771,7 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { session_index, false, None, + None, ) .await; @@ -1746,6 +1789,7 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { session_index, false, None, + None, ) .await; @@ -1963,6 +2007,7 @@ fn test_approvals_on_fork_are_always_considered_after_no_show( 1, false, None, + None, ) .await; @@ -2356,6 +2401,7 @@ fn import_checked_approval_updates_entries_and_schedules() { candidate_hash, session_index, false, + None, Some(sig_a), ) .await; @@ -2383,6 +2429,7 @@ fn import_checked_approval_updates_entries_and_schedules() { candidate_hash, session_index, true, + Some(ExpectApprovalsStatsCollected::new(candidate_hash, block_hash, vec![validator_index_a, validator_index_b])), Some(sig_b), ) .await; @@ -2511,6 +2558,15 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { } else { sign_approval(Sr25519Keyring::Bob, *candidate_hash, session_index) }; + + let expected_stats_collected = if i == 1 { + Some(ExpectApprovalsStatsCollected::new(candidate_hash1, block_hash, vec![validator1, validator2])) + } else if i == 3 { + Some(ExpectApprovalsStatsCollected::new(candidate_hash2, block_hash, vec![validator1, validator2])) + } else { + None + }; + let rx = import_approval( &mut virtual_overseer, block_hash, @@ -2519,6 +2575,7 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { *candidate_hash, session_index, expect_block_approved, + expected_stats_collected, Some(signature), ) .await; @@ -2791,6 +2848,7 @@ fn approved_ancestor_test( candidate_hash, i as u32 + 1, true, + Some(ExpectApprovalsStatsCollected::new(candidate_hash, *block_hash, vec![validator])), None, ) .await; @@ -3362,8 +3420,20 @@ where } let n_validators = validators.len(); + let to_collect = min((n_validators/3) + 1, approvals_to_import.len()); + let validators_collected = approvals_to_import[0..to_collect] + .iter() + .map(|vidx| ValidatorIndex(vidx.clone())) + .collect::>(); + for (i, &validator_index) in approvals_to_import.iter().enumerate() { let expect_chain_approved = 3 * (i + 1) > n_validators; + let expect_approvals_stats_collected = if expect_chain_approved { + Some(ExpectApprovalsStatsCollected::new(candidate_hash, block_hash, validators_collected.clone())) + } else { + None + }; + let rx = import_approval( &mut virtual_overseer, block_hash, @@ -3372,6 +3442,7 @@ where candidate_hash, 1, expect_chain_approved, + expect_approvals_stats_collected, Some(sign_approval(validators[validator_index as usize], candidate_hash, 1)), ) .await; @@ -3731,6 +3802,7 @@ fn pre_covers_dont_stall_approval() { candidate_hash, session_index, false, + None, Some(sig_b), ) .await; @@ -3746,6 +3818,7 @@ fn pre_covers_dont_stall_approval() { candidate_hash, session_index, false, + None, Some(sig_c), ) .await; @@ -3782,6 +3855,28 @@ fn pre_covers_dont_stall_approval() { assert_eq!(clock.inner.lock().next_wakeup(), Some(31)); clock.inner.lock().set_tick(31); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RewardsStatisticsCollector(RewardsStatisticsCollectorMessage::CandidateApproved( + c_hash, b_hash, validators, + )) => { + assert_eq!(b_hash, block_hash); + assert_eq!(c_hash, candidate_hash); + assert_eq!(validators, vec![validator_index_b, validator_index_c]); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RewardsStatisticsCollector(RewardsStatisticsCollectorMessage::NoShows( + c_hash, b_hash, validators + )) => { + assert_eq!(b_hash, block_hash); + assert_eq!(c_hash, candidate_hash); + assert_eq!(validators, vec![validator_index_a]); + } + ); + assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::ChainSelection(ChainSelectionMessage::Approved(b_hash)) => { @@ -3903,6 +3998,7 @@ fn waits_until_approving_assignments_are_old_enough() { candidate_hash, session_index, false, + None, Some(sig_a), ) .await; @@ -3919,6 +4015,7 @@ fn waits_until_approving_assignments_are_old_enough() { candidate_hash, session_index, false, + None, Some(sig_b), ) .await; @@ -3943,6 +4040,21 @@ fn waits_until_approving_assignments_are_old_enough() { // Sleep to ensure we get a consistent read on the database. futures_timer::Delay::new(Duration::from_millis(100)).await; + let _ = + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RewardsStatisticsCollector( + RewardsStatisticsCollectorMessage::CandidateApproved( + c_hash, b_hash, validators + ) + ) => { + assert_eq!(b_hash, block_hash); + assert_eq!(c_hash, candidate_hash); + assert_eq!(validators, vec![validator_index_a, validator_index_b]); + } + ); + assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::ChainSelection(ChainSelectionMessage::Approved(b_hash)) => { diff --git a/polkadot/node/core/rewards-statistics-collector/Cargo.toml b/polkadot/node/core/rewards-statistics-collector/Cargo.toml new file mode 100644 index 0000000000000..f8c2d8aa635d2 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "polkadot-node-core-rewards-statistics-collector" +version = "6.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "The Statistics Collector subsystem. Collects Approval Voting and Approvals Distributions stats." +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +fatality = { workspace = true } +futures = { workspace = true } +gum = { workspace = true, default-features = true } +thiserror = { workspace = true } + +polkadot-node-subsystem = { workspace = true, default-features = true } +polkadot-node-subsystem-util = { workspace = true, default-features = true } +polkadot-primitives = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } +sp-keystore = { workspace = true, default-features = true } + +[dev-dependencies] +assert_matches = { workspace = true } +polkadot-node-subsystem-test-helpers = { workspace = true } +polkadot-primitives = { workspace = true, features = ["test"] } +sp-authority-discovery = { workspace = true, default-features = true } +sp-application-crypto = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true } \ No newline at end of file diff --git a/polkadot/node/core/rewards-statistics-collector/src/approval_voting_metrics.rs b/polkadot/node/core/rewards-statistics-collector/src/approval_voting_metrics.rs new file mode 100644 index 0000000000000..f8392d5a9a5f9 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/approval_voting_metrics.rs @@ -0,0 +1,64 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::{HashMap, HashSet}; +use polkadot_primitives::{CandidateHash, Hash, SessionIndex, ValidatorIndex}; +use crate::metrics::Metrics; +use crate::View; + +#[derive(Debug, Clone, Default, Eq, PartialEq)] +pub struct ApprovalsStats { + pub votes: HashSet, + pub no_shows: HashSet, +} + +impl ApprovalsStats { + pub fn new(votes: HashSet, no_shows: HashSet) -> Self { + Self { votes, no_shows } + } +} + +pub fn handle_candidate_approved( + view: &mut View, + block_hash: Hash, + candidate_hash: CandidateHash, + approvals: Vec, +) { + if let Some(relay_view) = view.per_relay.get_mut(&block_hash) { + relay_view.approvals_stats + .entry(candidate_hash) + .and_modify(|a: &mut ApprovalsStats| { + a.votes.extend(approvals.iter()) + }) + .or_insert_with(|| { + ApprovalsStats::new(HashSet::from_iter(approvals), HashSet::new()) + }); + } +} + +pub fn handle_observed_no_shows( + view: &mut View, + block_hash: Hash, + candidate_hash: CandidateHash, + no_show_validators: Vec, +) { + if let Some(relay_view) = view.per_relay.get_mut(&block_hash) { + relay_view.approvals_stats + .entry(candidate_hash) + .and_modify(|a: &mut ApprovalsStats| a.no_shows.extend(no_show_validators.iter())) + .or_insert(ApprovalsStats::new(HashSet::new(), HashSet::from_iter(no_show_validators))); + } +} \ No newline at end of file diff --git a/polkadot/node/core/rewards-statistics-collector/src/availability_distribution_metrics.rs b/polkadot/node/core/rewards-statistics-collector/src/availability_distribution_metrics.rs new file mode 100644 index 0000000000000..2a57c200ad4d6 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/availability_distribution_metrics.rs @@ -0,0 +1,127 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::{HashMap, HashSet}; +use std::collections::hash_map::Entry; +use std::ops::Add; +use gum::CandidateHash; +use polkadot_primitives::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex}; +use crate::{PerSessionView, View}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AvailabilityChunks { + pub downloads_per_candidate: HashMap>, + pub uploads_per_candidate: HashMap>, +} + +impl AvailabilityChunks { + pub fn new() -> Self { + Self { + downloads_per_candidate: Default::default(), + uploads_per_candidate: Default::default(), + } + } + + pub fn note_candidate_chunk_downloaded( + &mut self, + candidate_hash: CandidateHash, + validator_index: ValidatorIndex, + count: u64, + ) { + let validator_downloads = self.downloads_per_candidate + .entry(candidate_hash) + .or_default() + .entry(validator_index); + + match validator_downloads { + Entry::Occupied(mut validator_downloads) => { + *validator_downloads.get_mut() += count; + } + Entry::Vacant(entry) => { entry.insert(count); } + } + } + + pub fn note_candidate_chunk_uploaded( + &mut self, + candidate_hash: CandidateHash, + validator_index: ValidatorIndex, + count: u64, + ) { + let validator_uploads = self.uploads_per_candidate + .entry(candidate_hash) + .or_default() + .entry(validator_index); + + match validator_uploads { + Entry::Occupied(mut validator_uploads) => { + *validator_uploads.get_mut() += count; + } + Entry::Vacant(entry) => { entry.insert(count); } + } + } +} + +// whenever chunks are acquired throughout availability +// recovery we collect the metrics about what validator +// provided and the amount of chunks +pub fn handle_chunks_downloaded( + view: &mut View, + session_index: SessionIndex, + candidate_hash: CandidateHash, + downloads: HashMap, +) { + let av_chunks = view.availability_chunks + .entry(session_index) + .or_insert(AvailabilityChunks::new()); + + for (validator_index, download_count) in downloads { + av_chunks.note_candidate_chunk_downloaded(candidate_hash, validator_index, download_count) + } +} + +// handle_chunk_uploaded receive the authority ids of the peer +// it just uploaded the candidate hash, to collect this statistic +// it needs to find the validator index that is bounded to any of the +// authority id, from the oldest to newest session. +pub fn handle_chunk_uploaded( + view: &mut View, + candidate_hash: CandidateHash, + authority_ids: HashSet, +) { + let mut sessions: Vec<(&SessionIndex, &PerSessionView)> = view.per_session.iter().collect(); + sessions.sort_by(|(a, _), (b, _)| a.partial_cmp(&b).unwrap()); + + for (session_idx, session_view) in sessions { + // Find the first authority with a matching validator index + if let Some(validator_idx) = authority_ids + .iter() + .find_map(|id| session_view.authorities_lookup.get(id).map(|v| v)) + { + let av_chunks = view.availability_chunks.entry(*session_idx); + match av_chunks { + Entry::Occupied(mut entry) => { + entry.get_mut() + .note_candidate_chunk_uploaded(candidate_hash, *validator_idx, 1); + } + Entry::Vacant(entry) => { + entry.insert(AvailabilityChunks::new()) + .note_candidate_chunk_uploaded(candidate_hash, *validator_idx, 1); + } + } + break; + } + } +} \ No newline at end of file diff --git a/polkadot/node/core/rewards-statistics-collector/src/error.rs b/polkadot/node/core/rewards-statistics-collector/src/error.rs new file mode 100644 index 0000000000000..8e9184603427f --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/error.rs @@ -0,0 +1,63 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Error types. + +use futures::channel::oneshot; + +use polkadot_node_subsystem::{ChainApiError, RuntimeApiError, SubsystemError}; +use polkadot_node_subsystem_util::runtime; + +use crate::LOG_TARGET; +use fatality::Nested; + +#[allow(missing_docs)] +#[fatality::fatality(splitable)] +pub enum Error { + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + + #[error("Sending message to overseer failed: {0}")] + OverseerCommunication(#[source] oneshot::Canceled), + + #[error("Failed to request runtime data: {0}")] + RuntimeApiCallError(#[source] RuntimeApiError), + + #[error("Failed to request chain api data: {0}")] + ChainApiCallError(#[source] ChainApiError), +} + +/// General `Result` type. +pub type Result = std::result::Result; +/// Result for non-fatal only failures. +pub type JfyiErrorResult = std::result::Result; +/// Result for fatal only failures. +pub type FatalResult = std::result::Result; + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error. This utility function is meant to +/// consume top-level errors by simply logging them +pub fn log_error(result: Result<()>, ctx: &'static str) -> FatalResult<()> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx); + Ok(()) + }, + } +} diff --git a/polkadot/node/core/rewards-statistics-collector/src/lib.rs b/polkadot/node/core/rewards-statistics-collector/src/lib.rs new file mode 100644 index 0000000000000..1cba03e719e63 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/lib.rs @@ -0,0 +1,721 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the Consensus Statistics Collector subsystem. +//! This component monitors and manages metrics related to parachain candidate approvals, +//! including approval votes, distribution of approval chunks, chunk downloads, and chunk uploads. +//! +//! Its primary responsibility is to collect and track data reflecting each node’s perspective +//! on the approval work carried out by all session validators. + + +use std::collections::{HashMap, HashSet, VecDeque}; +use std::collections::hash_map::Entry; +use std::task::Context; +use futures::{channel::oneshot, prelude::*}; +use gum::CandidateHash; +use sp_keystore::KeystorePtr; +use polkadot_node_subsystem::{ + errors::RuntimeApiError as RuntimeApiSubsystemError, + messages::{ChainApiMessage, RewardsStatisticsCollectorMessage, RuntimeApiMessage, RuntimeApiRequest}, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemSender +}; +use polkadot_primitives::{AuthorityDiscoveryId, BlockNumber, Hash, Header, SessionIndex, ValidatorId, ValidatorIndex, well_known_keys::relay_dispatch_queue_remaining_capacity, SessionInfo}; +use polkadot_node_primitives::{approval::{ + time::Tick, + v1::DelayTranche +}, SessionWindowSize, DISPUTE_WINDOW}; +use crate::{ + error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, +}; + +mod error; +#[cfg(test)] +mod tests; +mod approval_voting_metrics; +mod availability_distribution_metrics; +pub mod metrics; + +use approval_voting_metrics::ApprovalsStats; +use polkadot_node_subsystem::RuntimeApiError::{Execution, NotSupported}; +use polkadot_node_subsystem_util::{request_candidate_events, request_session_index_for_child, request_session_info}; +use polkadot_primitives::vstaging::{ApprovalStatistics, ApprovalStatisticsTallyLine}; +use crate::approval_voting_metrics::{handle_candidate_approved, handle_observed_no_shows}; +use crate::availability_distribution_metrics::{handle_chunk_uploaded, handle_chunks_downloaded, AvailabilityChunks}; +use self::metrics::Metrics; + +const MAX_SESSIONS_TO_KEEP: SessionWindowSize = DISPUTE_WINDOW; +const LOG_TARGET: &str = "parachain::rewards-statistics-collector"; + +#[derive(Default)] +pub struct Config { + pub verbose_approval_metrics: bool +} + +struct PerRelayView { + session_index: SessionIndex, + parent_hash: Option, + children: HashSet, + approvals_stats: HashMap, +} + +impl PerRelayView { + fn new(parent_hash: Option, session_index: SessionIndex) -> Self { + PerRelayView{ + session_index: session_index, + parent_hash: parent_hash, + children: HashSet::new(), + approvals_stats: HashMap::new(), + } + } + + fn link_child(&mut self, hash: Hash) { + self.children.insert(hash); + } +} + +#[derive(Debug, Eq, PartialEq, Clone, Default)] +struct PerValidatorTally { + no_shows: u32, + approvals: u32, +} + +impl PerValidatorTally { + fn increment_noshow(&mut self) { + self.no_shows += 1; + } + + fn increment_approval(&mut self) { + self.approvals += 1; + } +} + +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct PerSessionView { + credentials: Option, + authorities_lookup: HashMap, + validators_tallies: HashMap, +} + +impl PerSessionView { + fn new( + authorities_lookup: HashMap, + credentials: Option, + ) -> Self { + Self { + authorities_lookup, + credentials, + validators_tallies: HashMap::new(), + } + } +} + +/// A struct that holds the credentials required to sign the PVF check statements. These credentials +/// are implicitly to pinned to a session where our node acts as a validator. +#[derive(Debug, Eq, PartialEq, Clone)] +struct SigningCredentials { + /// The validator public key. + validator_key: ValidatorId, + /// The validator index in the current session. + validator_index: ValidatorIndex, +} + +/// View holds the subsystem internal state +struct View { + /// roots contains the only unfinalized relay hashes + /// is used when finalization happens to prune unneeded forks + roots: HashSet, + /// per_relay holds collected approvals statistics for + /// all the candidates under the given unfinalized relay hash + per_relay: HashMap, + /// per_session holds session information (authorities lookup) + /// and approvals tallies which is the aggregation of collected + /// approvals statistics under finalized blocks + per_session: HashMap, + /// availability_chunks holds collected upload and download chunks + /// statistics per validator + availability_chunks: HashMap, + current_session: Option, + recent_block: Option<(BlockNumber, Hash)>, +} + +impl View { + fn new() -> Self { + return View{ + roots: HashSet::new(), + per_relay: HashMap::new(), + per_session: HashMap::new(), + availability_chunks: HashMap::new(), + current_session: None, + recent_block: None, + }; + } + + // add_node includes a new activated block + // in the unfinalized blocks mapping, it also + // links the including block with its parent + // if its parent is present in the mapping + // otherwise the including block will be added + // in the roots set. + fn add_node( + &mut self, + activated_hash: Hash, + activated_header: Option
, + session_index: SessionIndex, + ) { + if let Some(h) = activated_header { + let parent_hash = h.parent_hash; + let parent_hash = match self.per_relay.get_mut(&parent_hash) { + Some(per_relay_view) => { + per_relay_view.link_child(activated_hash); + Some(parent_hash) + }, + None => { + _ = self.roots.insert(activated_hash); + None + }, + }; + + self.per_relay.insert(activated_hash, PerRelayView::new(parent_hash, session_index)); + } else { + self.roots.insert(activated_hash); + self.per_relay.insert(activated_hash, PerRelayView::new(None, session_index)); + } + } +} + +/// The statistics collector subsystem. +pub struct RewardsStatisticsCollector { + keystore: KeystorePtr, + metrics: Metrics, + config: Config +} + +impl RewardsStatisticsCollector { + /// Create a new instance of the `RewardsStatisticsCollector`. + pub fn new(keystore: KeystorePtr, metrics: Metrics, config: Config) -> Self { + Self { + metrics, + config, + keystore, + } + } +} + +#[overseer::subsystem(RewardsStatisticsCollector, error = SubsystemError, prefix = self::overseer)] +impl RewardsStatisticsCollector +where + Context: Send + Sync, +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx, self.keystore, (self.metrics, self.config.verbose_approval_metrics)) + .map_err(|e| SubsystemError::with_origin("statistics-parachains", e)) + .boxed(), + name: "rewards-statistics-collector-subsystem", + } + } +} + +#[overseer::contextbounds(RewardsStatisticsCollector, prefix = self::overseer)] +async fn run(mut ctx: Context, keystore: KeystorePtr, metrics: (Metrics, bool)) -> FatalResult<()> { + let mut view = View::new(); + loop { + error::log_error( + run_iteration(&mut ctx, &mut view, &keystore, (&metrics.0, metrics.1)).await, + "Encountered issue during run iteration", + )?; + } +} + +#[overseer::contextbounds(RewardsStatisticsCollector, prefix = self::overseer)] +pub(crate) async fn run_iteration( + ctx: &mut Context, + view: &mut View, + keystore: &KeystorePtr, + metrics: (&Metrics, bool), +) -> Result<()> { + let per_validator_metrics = metrics.1; + let mut sender = ctx.sender().clone(); + + loop { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + if let Some(activated) = update.activated { + let ActivationInfo { + activated_header, + session_index, + new_session_info, + recent_block, + } = extract_activated_leaf_info( + &mut sender, + view, + keystore, + activated.hash, + activated.number, + ).await?; + + let relay_hash = activated.hash; + view.recent_block = Some(recent_block); + + view.add_node( + relay_hash, + activated_header, + session_index, + ); + + if let Some((session_info, credentials)) = new_session_info { + let mut authority_lookup = HashMap::new(); + for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { + authority_lookup.insert(ad, ValidatorIndex(i as _)); + } + + view.per_session.insert(session_index, PerSessionView::new(authority_lookup, credentials)); + } + } + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(fin_block_hash, _)) => { + // when a block is finalized it performs: + // 1. Pruning unneeded forks + // 2. Collected statistics that belongs to the finalized chain + // 3. After collection of finalized statistics then remove finalized + // nodes from the mapping leaving only the unfinalized blocks after finalization + let finalized_hashes = prune_unfinalised_forks(view, fin_block_hash); + + // so we revert it and check from the oldest to the newest + for hash in finalized_hashes.iter().rev() { + if let Some((session_idx, approvals_stats)) = view + .per_relay + .remove(hash) + .map(|rb_view| (rb_view.session_index, rb_view.approvals_stats)) + { + if let Some(session_view) = view.per_session.get_mut(&session_idx) { + metrics.0.record_approvals_stats( + session_idx, + approvals_stats.clone(), + per_validator_metrics, + ); + + for stats in approvals_stats.values() { + // Increment no-show tallies + for &validator_idx in &stats.no_shows { + session_view + .validators_tallies + .entry(validator_idx) + .or_default() + .increment_noshow(); + } + + // Increment approval tallies + for &validator_idx in &stats.votes { + session_view + .validators_tallies + .entry(validator_idx) + .or_default() + .increment_approval(); + } + } + } + } + } + + log_session_view_general_stats(view); + prune_and_submit_finalized_session_stats( + ctx.sender(), + keystore, + view, + fin_block_hash, + metrics.0, + ).await?; + } + FromOrchestra::Communication { msg } => { + match msg { + RewardsStatisticsCollectorMessage::ChunksDownloaded( + session_index, + candidate_hash, + downloads, + )=> { + handle_chunks_downloaded( + view, + session_index, + candidate_hash, + downloads, + ) + }, + RewardsStatisticsCollectorMessage::ChunkUploaded( + candidate_hash, + authority_ids, + ) => { + handle_chunk_uploaded( + view, + candidate_hash, + authority_ids, + ) + }, + RewardsStatisticsCollectorMessage::CandidateApproved( + candidate_hash, + block_hash, + approvals, + ) => { + handle_candidate_approved( + view, + block_hash, + candidate_hash, + approvals, + ); + } + RewardsStatisticsCollectorMessage::NoShows( + candidate_hash, + block_hash, + no_show_validators, + ) => { + handle_observed_no_shows( + view, + block_hash, + candidate_hash, + no_show_validators, + ); + }, + } + }, + } + } +} + +struct ActivationInfo { + activated_header: Option
, + recent_block: (BlockNumber, Hash), + session_index: SessionIndex, + new_session_info: Option<(SessionInfo, Option)>, +} + +async fn extract_activated_leaf_info( + sender: &mut impl overseer::RewardsStatisticsCollectorSenderTrait, + view: &mut View, + keystore: &KeystorePtr, + relay_hash: Hash, + relay_number: BlockNumber, +) -> Result { + let recent_block = match view.recent_block { + Some((recent_block_num, recent_block_hash)) if relay_number < recent_block_num => { + // the existing recent block is not worse than the new activation, so leave it. + (recent_block_num, recent_block_hash) + }, + _ => (relay_number, relay_hash), + }; + + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; + let header = rx + .map_err(JfyiError::OverseerCommunication) + .await? + .map_err(JfyiError::ChainApiCallError)?; + + let session_idx = request_session_index_for_child(relay_hash, sender) + .await + .await + .map_err(JfyiError::OverseerCommunication)? + .map_err(JfyiError::RuntimeApiCallError)?; + + let new_session_info = if !view.per_session.contains_key(&session_idx) { + let session_info = request_session_info(relay_hash, session_idx, sender) + .await + .await + .map_err(JfyiError::OverseerCommunication)? + .map_err(JfyiError::RuntimeApiCallError)?; + + let (tx, rx) = oneshot::channel(); + let validators = runtime_api_request( + sender, + relay_hash, + RuntimeApiRequest::Validators(tx), + rx, + ).await?; + + let signing_credentials = polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) + .map(|(validator_key, validator_index)| + SigningCredentials { validator_key, validator_index }); + + if let Some(session_info) = session_info { + Some((session_info, signing_credentials)) + } else { + None + } + } else { + None + }; + + Ok(ActivationInfo { + activated_header: header, + recent_block, + session_index: session_idx, + new_session_info, + }) +} + +// prune_unfinalised_forks will remove all the relay chain blocks +// that are not in the finalized chain and its de pendants children using the latest finalized block as reference +// and will return a list of finalized hashes +fn prune_unfinalised_forks(view: &mut View, fin_block_hash: Hash) -> Vec { + // since we want to reward only valid approvals, we retain + // only finalized chain blocks and its descendants + // identify the finalized chain so we don't prune + let rb_view = match view.per_relay.get_mut(&fin_block_hash) { + Some(per_relay_view) => per_relay_view, + None => return Vec::new(), + }; + + let mut removal_stack = Vec::new(); + let mut retain_relay_hashes = Vec::new(); + retain_relay_hashes.push(fin_block_hash); + + let mut current_block_hash = fin_block_hash; + let mut current_parent_hash = rb_view.parent_hash; + while let Some(parent_hash) = current_parent_hash { + match view.per_relay.get_mut(&parent_hash) { + Some(parent_view) => { + retain_relay_hashes.push(parent_hash.clone()); + + if parent_view.children.len() > 1 { + let filtered_set = parent_view.children + .iter() + .filter(|&child_hash| !child_hash.eq(¤t_block_hash)) + .cloned() // Clone the elements to own them in the new HashSet + .collect::>(); + + removal_stack.extend(filtered_set); + + // unlink all the other children keeping only + // the one that belongs to the finalized chain + parent_view.children = HashSet::from_iter(vec![current_block_hash.clone()]); + } + current_block_hash = parent_hash; + current_parent_hash = parent_view.parent_hash; + }, + None => break + }; + } + + // update the roots to be the children of the latest finalized block + if let Some(finalized_hash) = retain_relay_hashes.first() { + if let Some(rb_view) = view.per_relay.get(finalized_hash) { + view.roots = rb_view.children.clone(); + } + } + + let mut to_prune = HashSet::new(); + let mut queue: VecDeque = VecDeque::from(removal_stack); + while let Some(hash) = queue.pop_front() { + _ = to_prune.insert(hash); + + if let Some(r_view) = view.per_relay.get(&hash) { + for child in &r_view.children { + queue.push_back(child.clone()); + } + } + } + + for rb_hash in to_prune { + view.per_relay.remove(&rb_hash); + } + + retain_relay_hashes +} + +// prune_and_submit_finalized_session_stats avoid the per_session mapping to grow +// indefinitely by removing sessions stored for more than MAX_SESSIONS_TO_KEEP (2) +// finalized sessions. +async fn prune_and_submit_finalized_session_stats( + sender: &mut impl overseer::RewardsStatisticsCollectorSenderTrait, + keystore: &KeystorePtr, + view: &mut View, + finalized_hash: Hash, + metrics: &Metrics, +) -> Result<()> { + let recent_block_hash = match view.recent_block { + Some((_, block_hash)) => block_hash, + None => { + gum::debug!( + target: LOG_TARGET, + ?finalized_hash, + "recent block does not exist or got erased, cannot submit finalized session statistics" + ); + return Ok(()); + }, + }; + + let finalized_session = request_session_index_for_child(finalized_hash, sender) + .await + .await + .map_err(JfyiError::OverseerCommunication)? + .map_err(JfyiError::RuntimeApiCallError)?; + + match view.current_session { + Some(current_session) if current_session < finalized_session => { + // the previous session was finalized + for (session_idx, session_view) in view + .per_session + .iter() + .filter(|stored_session_idx| stored_session_idx.0 < &finalized_session) { + + if let Some(ref credentials) = session_view.credentials { + sign_and_submit_approvals_tallies( + sender, + recent_block_hash, + session_idx, + keystore, + credentials, + metrics, + session_view.validators_tallies.clone(), + ).await; + } + } + + if let Some(wipe_before) = current_session.checked_sub(MAX_SESSIONS_TO_KEEP.get()) { + view.per_session.retain(|stored_session_index, _| *stored_session_index > wipe_before); + } + + view.current_session = Some(finalized_session); + } + None => view.current_session = Some(finalized_session), + _ => {} + }; + + Ok(()) +} + +fn log_session_view_general_stats(view: &View) { + for (session_index, session_view) in &view.per_session { + let session_tally = session_view + .validators_tallies + .values() + .map(|tally| (tally.approvals, tally.no_shows)) + .fold((0, 0), |acc, (approvals, noshows)| (acc.0 + approvals, acc.1 + noshows)); + + gum::debug!( + target: LOG_TARGET, + session_idx = ?session_index, + approvals = ?session_tally.0, + noshows = ?session_tally.1, + "session collected statistics" + ); + } +} + +async fn sign_and_submit_approvals_tallies( + sender: &mut impl SubsystemSender, + relay_parent: Hash, + session_index: &SessionIndex, + keystore: &KeystorePtr, + credentials: &SigningCredentials, + metrics: &Metrics, + tallies: HashMap, +) { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + "submitting {} approvals tallies for session {}", + tallies.len(), + session_index, + ); + + let mut validators_indexes = tallies.keys().collect::>(); + validators_indexes.sort(); + + let mut approvals_tallies: Vec = Vec::with_capacity(tallies.len()); + for validator_index in validators_indexes { + let current_tally = tallies.get(validator_index).unwrap(); + approvals_tallies.push(ApprovalStatisticsTallyLine { + validator_index: validator_index.clone(), + approvals_usage: current_tally.approvals, + no_shows: current_tally.no_shows, + }); + } + + let payload = ApprovalStatistics(session_index.clone(), credentials.validator_index, approvals_tallies); + + let signature = match polkadot_node_subsystem_util::sign( + keystore, + &credentials.validator_key, + &payload.signing_payload(), + ) { + Ok(Some(signature)) => signature, + Ok(None) => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?credentials.validator_index, + "private key for signing is not available", + ); + return + }, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?credentials.validator_index, + "error signing the statement: {:?}", + e, + ); + return + }, + }; + + let (tx, rx) = oneshot::channel(); + let runtime_req = runtime_api_request( + sender, + relay_parent, + RuntimeApiRequest::SubmitApprovalStatistics(payload, signature, tx), + rx, + ).await; + + match runtime_req { + Ok(()) => { + metrics.on_approvals_submitted(); + }, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + "error occurred during submitting a approvals rewards tallies: {:?}", + e, + ); + }, + } +} + +#[derive(Debug)] +pub(crate) enum RuntimeRequestError { + NotSupported, + ApiError, + CommunicationError, +} + +async fn runtime_api_request( + sender: &mut impl SubsystemSender, + relay_parent: Hash, + request: RuntimeApiRequest, + receiver: oneshot::Receiver>, +) -> std::result::Result { + sender + .send_message(RuntimeApiMessage::Request(relay_parent, request).into()) + .await; + + receiver + .map_err(JfyiError::OverseerCommunication) + .await? + .map_err(JfyiError::RuntimeApiCallError) +} diff --git a/polkadot/node/core/rewards-statistics-collector/src/metrics.rs b/polkadot/node/core/rewards-statistics-collector/src/metrics.rs new file mode 100644 index 0000000000000..68408317c7ae4 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/metrics.rs @@ -0,0 +1,134 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::collections::HashMap; +use gum::CandidateHash; +use polkadot_node_subsystem::prometheus::Opts; +use polkadot_node_subsystem_util::metrics::{ + self, + prometheus::{self, Gauge, GaugeVec, U64}, +}; +use polkadot_primitives::SessionIndex; +use crate::approval_voting_metrics::ApprovalsStats; + +#[derive(Clone)] +pub(crate) struct MetricsInner { + approvals_usage_per_session: prometheus::CounterVec, + no_shows_per_session: prometheus::CounterVec, + + approvals_per_session_per_validator: prometheus::CounterVec, + no_shows_per_session_per_validator: prometheus::CounterVec, + + approvals_stats_submittion_total: prometheus::Counter, +} + + +/// Candidate backing metrics. +#[derive(Default, Clone)] +pub struct Metrics (pub(crate) Option); + +impl Metrics { + pub fn record_approvals_stats( + &self, + session: SessionIndex, + approval_stats: HashMap, + per_validator_metrics: bool, + ) { + self.0.as_ref().map(|metrics| { + for stats in approval_stats.values() { + metrics.approvals_usage_per_session.with_label_values( + &[session.to_string().as_str()]).inc_by(stats.votes.len() as u64); + + metrics.no_shows_per_session.with_label_values( + &[session.to_string().as_str()]).inc_by(stats.no_shows.len() as u64); + + if per_validator_metrics { + for validator in &stats.votes { + metrics.approvals_per_session_per_validator.with_label_values( + &[session.to_string().as_str(), validator.0.to_string().as_str()]).inc() + } + + for validator in &stats.no_shows { + metrics.no_shows_per_session_per_validator.with_label_values( + &[session.to_string().as_str(), validator.0.to_string().as_str()]).inc() + } + } + } + }); + } + + pub fn on_approvals_submitted(&self) { + self.0.as_ref().map(|metrics| { + metrics.approvals_stats_submittion_total.inc(); + }); + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + approvals_per_session_per_validator: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_rewards_statistics_collector_approvals_per_session_per_validator", + "Total number of useful approvals a given validator provided on a session.", + ), + vec!["session", "validator_idx"].as_ref(), + )?, + registry, + )?, + no_shows_per_session_per_validator: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_rewards_statistics_collector_no_shows_per_session_per_validator", + "Total number a given validator no showed on a session.", + ), + vec!["session", "validator_idx"].as_ref(), + )?, + registry, + )?, + approvals_usage_per_session: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_rewards_statistics_collector_approvals_per_session", + "Total number of useful approvals on a session.", + ), + vec!["session"].as_ref(), + )?, + registry, + )?, + no_shows_per_session: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_rewards_statistics_collector_no_shows_per_session", + "Total number of no-shows on a session.", + ), + vec!["session"].as_ref(), + )?, + registry, + )?, + approvals_stats_submittion_total: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_rewards_statistics_collector_submittion_started", + "The number of rewards tallies submitted" + )?, + registry + )?, + }; + + Ok(Metrics(Some(metrics))) + } +} diff --git a/polkadot/node/core/rewards-statistics-collector/src/tests.rs b/polkadot/node/core/rewards-statistics-collector/src/tests.rs new file mode 100644 index 0000000000000..f3748af216396 --- /dev/null +++ b/polkadot/node/core/rewards-statistics-collector/src/tests.rs @@ -0,0 +1,814 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use std::ptr::hash; +use super::*; +use assert_matches::assert_matches; +use overseer::FromOrchestra; +use polkadot_primitives::{AssignmentId, GroupIndex, SessionIndex, SessionInfo}; +use polkadot_node_subsystem::messages::{AllMessages, ChainApiResponseChannel, RewardsStatisticsCollectorMessage, RuntimeApiMessage, RuntimeApiRequest}; + +type VirtualOverseer = + polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; +use polkadot_node_subsystem::{ActivatedLeaf}; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_primitives::{Hash, Header}; +use sp_application_crypto::Pair as PairT; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; +use test_helpers::mock::new_leaf; + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + activated: ActivatedLeaf, + leaf_header: Header, + session_index: SessionIndex, + session_info: Option, +) { + let activated_leaf_hash = activated.hash; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(relay_hash, tx) + ) if relay_hash == activated_leaf_hash => { + tx.send(Ok(Some(leaf_header))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == activated_leaf_hash => { + tx.send(Ok(session_index)).unwrap(); + } + ); + + if let Some(session_info) = session_info { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(req_session, tx)) + ) if req_session == session_index => { + tx.send(Ok(Some(session_info))).unwrap(); + } + ); + } +} + +async fn finalize_block( + virtual_overseer: &mut VirtualOverseer, + finalized: (Hash, BlockNumber), + session_index: SessionIndex, +) { + let fin_block_hash = finalized.0; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::BlockFinalized(fin_block_hash, finalized.1))) + .await; +} + +async fn candidate_approved( + virtual_overseer: &mut VirtualOverseer, + candidate_hash: CandidateHash, + rb_hash: Hash, + approvals: Vec, +) { + let msg = FromOrchestra::Communication { + msg: RewardsStatisticsCollectorMessage::CandidateApproved( + candidate_hash.clone(), + rb_hash.clone(), + approvals, + ), + }; + virtual_overseer.send(msg).await; +} + +async fn no_shows( + virtual_overseer: &mut VirtualOverseer, + candidate_hash: CandidateHash, + rb_hash: Hash, + no_shows: Vec, +) { + let msg = FromOrchestra::Communication { + msg: RewardsStatisticsCollectorMessage::NoShows( + candidate_hash.clone(), + rb_hash.clone(), + no_shows, + ), + }; + virtual_overseer.send(msg).await; +} + +macro_rules! approvals_stats_assertion { + ($fn_name:ident, $field:ident) => { + fn $fn_name( + view: &View, + rb_hash: Hash, + candidate_hash: CandidateHash, + expected_votes: Vec, + ) { + let stats_for = view.per_relay.get(&rb_hash).unwrap(); + let approvals_for = stats_for.approvals_stats.get(&candidate_hash).unwrap(); + let collected = approvals_for + .$field + .clone() + .into_iter() + .collect::>(); + + assert_eq!(collected.len(), expected_votes.len()); + for item in collected { + assert!(expected_votes.contains(&item)); + } + } + }; +} + +approvals_stats_assertion!(assert_votes, votes); +approvals_stats_assertion!(assert_no_shows, no_shows); + +fn test_harness>( + view: &mut View, + test: impl FnOnce(VirtualOverseer) -> T, +) { + sp_tracing::init_for_tests(); + + let pool = sp_core::testing::TaskExecutor::new(); + + let (mut context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone()); + + let subsystem = async move { + if let Err(e) = run_iteration(&mut context, view, &Metrics(None)).await { + panic!("{:?}", e); + } + + view + }; + + let test_fut = test(virtual_overseer); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + let (_, view) = futures::executor::block_on(future::join( + async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + }, + subsystem, + )); +} + +#[test] +fn single_candidate_approved() { + let validator_idx = ValidatorIndex(2); + let candidate_hash: CandidateHash = CandidateHash( + Hash::from_low_u64_be(111)); + + let rb_hash = Hash::from_low_u64_be(132); + let leaf = new_leaf( + rb_hash.clone(), + 1, + ); + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + activate_leaf( + &mut virtual_overseer, + leaf.clone(), + default_header(), + 1, + Some(default_session_info(1)), + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash.clone(), rb_hash, vec![validator_idx.clone()]).await; + virtual_overseer + }); + + assert_eq!(view.per_relay.len(), 1); + let stats_for = view.per_relay.get(&rb_hash).unwrap(); + let approvals_for = stats_for.approvals_stats.get(&candidate_hash).unwrap(); + + let expected_votes = vec![validator_idx]; + let collected_votes= approvals_for + .clone() + .votes + .into_iter() + .collect::>(); + + assert_eq!(expected_votes, collected_votes); +} + +#[test] +fn candidate_approved_for_different_forks() { + let validator_idx0 = ValidatorIndex(0); + let validator_idx1 = ValidatorIndex(1); + + let candidate_hash: CandidateHash = CandidateHash( + Hash::from_low_u64_be(111)); + + let rb_hash_fork_0 = Hash::from_low_u64_be(132); + let rb_hash_fork_1 = Hash::from_low_u64_be(231); + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + let leaf0 = new_leaf( + rb_hash_fork_0.clone(), + 1, + ); + + let leaf1 = new_leaf( + rb_hash_fork_1.clone(), + 1, + ); + + activate_leaf( + &mut virtual_overseer, + leaf0.clone(), + default_header(), + 1, + Some(default_session_info(1)), + ).await; + + activate_leaf( + &mut virtual_overseer, + leaf1.clone(), + default_header(), + 1, + None, + ).await; + + candidate_approved( + &mut virtual_overseer, + candidate_hash, + rb_hash_fork_0, + vec![validator_idx1], + ).await; + + candidate_approved( + &mut virtual_overseer, + candidate_hash, + rb_hash_fork_1, + vec![validator_idx0], + ).await; + + virtual_overseer + }); + + assert_eq!(view.per_relay.len(), 2); + + let expected_fork_0 = vec![validator_idx1]; + assert_votes(&view, rb_hash_fork_0, candidate_hash.clone(), expected_fork_0); + + let expected_fork_1 = vec![validator_idx0]; + assert_votes(&view, rb_hash_fork_1, candidate_hash.clone(), expected_fork_1); +} + +#[test] +fn candidate_approval_stats_with_no_shows() { + let approvals_from = vec![ValidatorIndex(0), ValidatorIndex(3)]; + let no_show_validators = vec![ValidatorIndex(1), ValidatorIndex(2)]; + + let rb_hash = Hash::from_low_u64_be(111); + let candidate_hash: CandidateHash = CandidateHash(Hash::from_low_u64_be(132)); + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + let leaf1 = new_leaf(rb_hash.clone(), 1); + activate_leaf( + &mut virtual_overseer, + leaf1.clone(), + default_header(), + 1, + Some(default_session_info(1)), + ).await; + + candidate_approved( + &mut virtual_overseer, + candidate_hash, + rb_hash, + approvals_from, + ).await; + + no_shows( + &mut virtual_overseer, + candidate_hash, + rb_hash, + no_show_validators + ).await; + + virtual_overseer + }); + + assert_eq!(view.per_relay.len(), 1); + let expected_validators = vec![ValidatorIndex(0), ValidatorIndex(3)]; + assert_votes(&view, rb_hash, candidate_hash.clone(), expected_validators); +} + +#[test] +fn note_chunks_downloaded() { + let candidate_hash = CandidateHash(Hash::from_low_u64_be(132)); + let session_idx: SessionIndex = 2 ; + let chunk_downloads = vec![ + (ValidatorIndex(0), 10u64), + (ValidatorIndex(1), 2), + ]; + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + virtual_overseer.send(FromOrchestra::Communication { + msg: RewardsStatisticsCollectorMessage::ChunksDownloaded( + session_idx, candidate_hash.clone(), HashMap::from_iter(chunk_downloads.clone().into_iter()), + ), + }).await; + + // should increment only validator 0 + let second_round_of_downloads = vec![ + (ValidatorIndex(0), 5u64) + ]; + virtual_overseer.send(FromOrchestra::Communication { + msg: RewardsStatisticsCollectorMessage::ChunksDownloaded( + session_idx, candidate_hash.clone(), HashMap::from_iter(second_round_of_downloads.into_iter()), + ), + }).await; + + virtual_overseer + }); + + assert_eq!(view.availability_chunks.len(), 1); + let ac = view.availability_chunks.get(&session_idx).unwrap(); + + assert_eq!(ac.downloads_per_candidate.len(), 1); + let amt_per_validator = ac.downloads_per_candidate + .get(&candidate_hash) + .unwrap(); + + let expected = vec![ + (ValidatorIndex(0), 15u64), + (ValidatorIndex(1), 2), + ]; + + for (vidx, expected_count) in expected { + let count = amt_per_validator.get(&vidx).unwrap(); + assert_eq!(*count, expected_count); + } +} + +fn default_header() -> Header { + Header { + parent_hash: Hash::zero(), + number: 1, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + } +} + +fn header_with_number_and_parent(block_number: BlockNumber, parent_hash: Hash) -> Header { + let mut header = default_header(); + header.number = block_number; + header.parent_hash = parent_hash; + header +} + +fn default_session_info(session_idx: SessionIndex) -> SessionInfo { + SessionInfo { + active_validator_indices: vec![], + random_seed: Default::default(), + dispute_period: session_idx, + validators: Default::default(), + discovery_keys: vec![], + assignment_keys: vec![], + validator_groups: Default::default(), + n_cores: 0, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + } +} + +#[test] +fn note_chunks_uploaded_to_active_validator() { + let activated_leaf_hash = Hash::from_low_u64_be(111); + let leaf1 = new_leaf(activated_leaf_hash.clone(), 1); + let leaf1_header = default_header(); + let session_index: SessionIndex = 2; + let mut session_info: SessionInfo = default_session_info(session_index); + + let validator_idx_pair = AuthorityDiscoveryPair::generate(); + let validator_idx_auth_id: AuthorityDiscoveryId = validator_idx_pair.0.public().into(); + + session_info.discovery_keys = vec![ + validator_idx_auth_id.clone(), + ]; + + let candidate_hash: CandidateHash = CandidateHash(Hash::from_low_u64_be(132)); + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + activate_leaf( + &mut virtual_overseer, + leaf1, + leaf1_header, + session_index, + Some(session_info), + ).await; + + virtual_overseer.send(FromOrchestra::Communication { + msg: RewardsStatisticsCollectorMessage::ChunkUploaded( + candidate_hash.clone(), HashSet::from_iter(vec![validator_idx_auth_id.clone()]), + ), + }).await; + + virtual_overseer + }); + + let validator_idx_auth_id: AuthorityDiscoveryId = validator_idx_pair.0.public().into(); + + // assert that the leaf was activated and the session info is present + let expected_view = PerSessionView::new( + HashMap::from_iter(vec![(validator_idx_auth_id.clone(), ValidatorIndex(0))])); + + assert_eq!(view.per_session.len(),1); + assert_eq!(view.per_session.get(&2).unwrap().clone(), expected_view); + + assert_matches!(view.availability_chunks.len(), 1); + + let mut expected_av_chunks = AvailabilityChunks::new(); + expected_av_chunks.note_candidate_chunk_uploaded( + candidate_hash, ValidatorIndex(0), 1); + + assert_matches!(view.availability_chunks.get(&2).unwrap(), expected_av_chunks); +} + +#[test] +fn prune_unfinalized_forks() { + // testing pruning capabilities + // the pruning happens when a session is finalized + // means that all the collected data for the finalized session + // should be kept and the collected data that belongs to unfinalized + // should be pruned + + // Building a "chain" with the following relay blocks (all in the same session) + // A -> B + // A -> C -> D + + let hash_a = Hash::from_slice(&[00; 32]); + let hash_b = Hash::from_slice(&[01; 32]); + let hash_c = Hash::from_slice(&[02; 32]); + let hash_d = Hash::from_slice(&[03; 32]); + let session_zero: SessionIndex = 0; + + let candidate_hash_a: CandidateHash = CandidateHash(Hash::from_low_u64_be(100)); + let candidate_hash_b: CandidateHash = CandidateHash(Hash::from_low_u64_be(200)); + let candidate_hash_c: CandidateHash = CandidateHash(Hash::from_low_u64_be(300)); + let candidate_hash_d: CandidateHash = CandidateHash(Hash::from_low_u64_be(400)); + + let mut view = View::new(); + test_harness(&mut view, |mut virtual_overseer| async move { + let leaf_a = new_leaf(hash_a.clone(), 1); + let leaf_a_header = default_header(); + + activate_leaf( + &mut virtual_overseer, + leaf_a, + leaf_a_header, + session_zero, + Some(default_session_info(session_zero)), + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash_a, hash_a, + vec![ValidatorIndex(2), ValidatorIndex(3)]).await; + no_shows(&mut virtual_overseer, candidate_hash_a, hash_a, + vec![ValidatorIndex(0), ValidatorIndex(1)]).await; + + let leaf_b = new_leaf(hash_b.clone(), 2); + let leaf_b_header = header_with_number_and_parent(2, hash_a.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_b, + leaf_b_header, + session_zero, + None, + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash_b, hash_b, + vec![ValidatorIndex(0), ValidatorIndex(1)]).await; + + let leaf_c = new_leaf(hash_c.clone(), 2); + let leaf_c_header = header_with_number_and_parent(2, hash_a.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_c, + leaf_c_header, + session_zero, + None, + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash_c, hash_c, + vec![ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]).await; + + let leaf_d = new_leaf(hash_d.clone(), 3); + let leaf_d_header = header_with_number_and_parent(3, hash_c.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_d, + leaf_d_header, + session_zero, + None, + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash_d, hash_d, + vec![ValidatorIndex(0), ValidatorIndex(1)]).await; + + virtual_overseer + }); + + let expect = vec![ + // relay node A should have 2 children (B, C) + (hash_a.clone(), (None, vec![hash_b.clone(), hash_c.clone()])), + + // relay node B should link to A and have no children + (hash_b.clone(), (Some(hash_a.clone()), vec![])), + + // relay node C should link to A and have 1 child (D) + (hash_c.clone(), (Some(hash_a.clone()), vec![hash_d.clone()])), + + // relay node D should link to C and have no children + (hash_d.clone(), (Some(hash_c.clone()), vec![])), + ]; + + // relay node A should be the root + assert_roots_and_relay_views( + &view, + vec![hash_a], + expect.clone(), + ); + + // Finalizing block C should prune the current unfinalized mapping + // and aggregate data of the finalized chain on the per session view + // the collected data for block D should remain untouched + test_harness(&mut view, |mut virtual_overseer| async move { + finalize_block( + &mut virtual_overseer, + (hash_c.clone(), 2), + session_zero).await; + + virtual_overseer + }); + + let expect = vec![ + // relay node D should link to C and have no children + (hash_d.clone(), (Some(hash_c.clone()), vec![])), + ]; + + assert_roots_and_relay_views( + &view, + vec![hash_d], + expect.clone(), + ); + + // check if the data was aggregated correctly for the session view + // it should aggregat approvals and noshows collected on blocks + // A and C. + // Data collected on block B should be discarded + // Data collected on block D should remain in the mapping as it was not finalized or pruned + let expected_tallies = HashMap::from_iter(vec![ + ( + ValidatorIndex(0), + PerValidatorTally { + no_shows: 1, + approvals: 1, + }, + ), + ( + ValidatorIndex(1), + PerValidatorTally { + no_shows: 1, + approvals: 1, + }, + ), + ( + ValidatorIndex(2), + PerValidatorTally { + no_shows: 0, + approvals: 2, + }, + ), + ( + ValidatorIndex(3), + PerValidatorTally { + no_shows: 0, + approvals: 1, + }, + ), + ]); + + assert_per_session_tallies(&view.per_session, 0, expected_tallies); + // creating more 3 relay block (E, F, G), all in session 1 + // D -> E -> F + // -> G + + let hash_e = Hash::from_slice(&[04; 32]); + let hash_f = Hash::from_slice(&[05; 32]); + let hash_g = Hash::from_slice(&[06; 32]); + + let candidate_hash_e = CandidateHash(Hash::from_low_u64_be(0xEE0011)); + let session_one: SessionIndex = 1; + + test_harness(&mut view, |mut virtual_overseer| async move { + let leaf_e = new_leaf(hash_e.clone(), 4); + let leaf_e_header = header_with_number_and_parent(4, hash_d.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_e, + leaf_e_header, + session_one, + Some(default_session_info(session_one)), + ).await; + + candidate_approved(&mut virtual_overseer, candidate_hash_e, hash_e, + vec![ValidatorIndex(3), ValidatorIndex(1), ValidatorIndex(0)]).await; + no_shows(&mut virtual_overseer, candidate_hash_e, hash_e, + vec![ValidatorIndex(2)]).await; + + let leaf_f = new_leaf(hash_f.clone(), 5); + let leaf_f_header = header_with_number_and_parent(5, hash_e.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_f, + leaf_f_header, + session_one, + None, + ).await; + + let leaf_g = new_leaf(hash_g.clone(), 5); + let leaf_g_header = header_with_number_and_parent(5, hash_e.clone()); + + activate_leaf( + &mut virtual_overseer, + leaf_g, + leaf_g_header, + session_one, + None, + ).await; + + // finalizing relay block E + finalize_block( + &mut virtual_overseer, + (hash_e.clone(), 4), + session_one).await; + + virtual_overseer + }); + + // Finalizing block E triggers the pruning mechanism + // now it should aggregate collected data from block D and E + // keeping only blocks F and E on the mapping + let expect = vec![ + // relay node F should link to E and have no children + (hash_f.clone(), (Some(hash_e), vec![])), + + // relay node G should link to E and have no children + (hash_g.clone(), (Some(hash_e), vec![])), + ]; + + // relay node A should be the root + assert_roots_and_relay_views( + &view, + vec![hash_f, hash_g], + expect.clone(), + ); + + let expected_tallies = HashMap::from_iter(vec![ + ( + ValidatorIndex(0), + PerValidatorTally { + no_shows: 1, + // validator 0 approvals increased from 1 to 2 + // as block D with more collected approvals + // was finalized + approvals: 2, + }, + ), + ( + ValidatorIndex(1), + PerValidatorTally { + no_shows: 1, + approvals: 2, + }, + ), + ( + ValidatorIndex(2), + PerValidatorTally { + no_shows: 0, + approvals: 2, + }, + ), + ( + ValidatorIndex(3), + PerValidatorTally { + no_shows: 0, + approvals: 1, + }, + ), + ]); + + assert_per_session_tallies(&view.per_session, 0, expected_tallies); + + let expected_tallies = HashMap::from_iter(vec![ + ( + ValidatorIndex(0), + PerValidatorTally { + no_shows: 0, + approvals: 1, + }, + ), + ( + ValidatorIndex(1), + PerValidatorTally { + no_shows: 0, + approvals: 1, + }, + ), + ( + ValidatorIndex(2), + PerValidatorTally { + no_shows: 1, + approvals: 0, + }, + ), + ( + ValidatorIndex(3), + PerValidatorTally { + no_shows: 0, + approvals: 1, + }, + ), + ]); + + assert_per_session_tallies(&view.per_session, 1, expected_tallies); +} + +fn assert_roots_and_relay_views( + view: &View, + roots: Vec, + relay_views: Vec<(Hash, (Option, Vec))>, +) { + assert_eq!(view.roots, HashSet::from_iter(roots)); + assert_eq!(view.per_relay.len(), relay_views.len()); + + for (rb_hash, checks) in relay_views.into_iter() { + let rb_view = view.per_relay.get(&rb_hash).unwrap(); + assert_eq!(rb_view.parent_hash, checks.0); + assert_eq!(rb_view.children.len(), checks.1.len()); + + for child in checks.1.iter() { + assert!(rb_view.children.contains(child)); + } + } +} + +fn assert_per_session_tallies( + per_session_view: &HashMap, + session_idx: SessionIndex, + expected_tallies: HashMap, +) { + let session_view = per_session_view + .get(&session_idx) + .expect("session index should exists in the view"); + + assert_eq!(session_view.validators_tallies.len(), expected_tallies.len()); + for (validator_index, expected_tally) in expected_tallies.iter() { + assert_eq!( + session_view.validators_tallies.get(validator_index), + Some(expected_tally), + "unexpected value for validator index {:?}", validator_index + ); + } +} \ No newline at end of file diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 9c09ea3f22a9e..2f4ad8bdea101 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -662,6 +662,7 @@ pub(crate) enum RequestResult { // This is a request with side-effects and no result, hence (). #[allow(dead_code)] SubmitPvfCheckStatement(()), + SubmitApprovalStatistics(()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 5b7be703ae6b8..48d8461e036aa 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -156,6 +156,7 @@ where PvfsRequirePrecheck(relay_parent, pvfs) => self.requests_cache.cache_pvfs_require_precheck(relay_parent, pvfs), SubmitPvfCheckStatement(()) => {}, + SubmitApprovalStatistics(()) => {}, ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), @@ -309,6 +310,10 @@ where // This request is side-effecting and thus cannot be cached. Some(request) }, + request @ Request::SubmitApprovalStatistics(_, _, _) => { + // This request is side-effecting and thus cannot be cached. + Some(request) + }, Request::ValidationCodeHash(para, assumption, sender) => query!(validation_code_hash(para, assumption), sender) .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), @@ -619,6 +624,15 @@ where result = () ) }, + Request::SubmitApprovalStatistics(payload, signature, sender) => { + query!( + SubmitApprovalStatistics, + submit_approval_statistics(payload, signature), + ver = 2, + sender, + result = () + ) + }, Request::PvfsRequirePrecheck(sender) => { query!(PvfsRequirePrecheck, pvfs_require_precheck(), ver = 2, sender) }, diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index 93565628e6ed2..12ae0439dbf86 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -30,10 +30,12 @@ polkadot-node-subsystem-util = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } sc-network = { workspace = true, default-features = true } +sc-network-types = { workspace = true, default-features = true } schnellru = { workspace = true } sp-core = { features = ["std"], workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } thiserror = { workspace = true } +async-trait = "0.1.88" [dev-dependencies] assert_matches = { workspace = true } diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs index 438453814978c..ae2663e552c53 100644 --- a/polkadot/node/network/availability-distribution/src/lib.rs +++ b/polkadot/node/network/availability-distribution/src/lib.rs @@ -46,6 +46,7 @@ use responder::{run_chunk_receivers, run_pov_receiver}; mod metrics; /// Prometheus `Metrics` for availability distribution. pub use metrics::Metrics; +use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery; #[cfg(test)] mod tests; @@ -53,9 +54,10 @@ mod tests; const LOG_TARGET: &'static str = "parachain::availability-distribution"; /// The availability distribution subsystem. -pub struct AvailabilityDistributionSubsystem { +pub struct AvailabilityDistributionSubsystem { /// Easy and efficient runtime access for this subsystem. runtime: RuntimeInfo, + authority_discovery_service: AD, /// Receivers to receive messages from. recvs: IncomingRequestReceivers, /// Mapping of the req-response protocols to the full protocol names. @@ -75,7 +77,9 @@ pub struct IncomingRequestReceivers { } #[overseer::subsystem(AvailabilityDistribution, error=SubsystemError, prefix=self::overseer)] -impl AvailabilityDistributionSubsystem { +impl AvailabilityDistributionSubsystem + where AD: AuthorityDiscovery + Clone + Sync, +{ fn start(self, ctx: Context) -> SpawnedSubsystem { let future = self .run(ctx) @@ -87,21 +91,25 @@ impl AvailabilityDistributionSubsystem { } #[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)] -impl AvailabilityDistributionSubsystem { +impl AvailabilityDistributionSubsystem +where + AD: AuthorityDiscovery + Clone + Sync +{ /// Create a new instance of the availability distribution. pub fn new( keystore: KeystorePtr, recvs: IncomingRequestReceivers, req_protocol_names: ReqProtocolNames, + authority_discovery_service: AD, metrics: Metrics, ) -> Self { let runtime = RuntimeInfo::new(Some(keystore)); - Self { runtime, recvs, req_protocol_names, metrics } + Self { runtime, authority_discovery_service, recvs, req_protocol_names, metrics } } /// Start processing work as passed on from the Overseer. async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { - let Self { mut runtime, recvs, metrics, req_protocol_names } = self; + let Self { mut runtime, mut authority_discovery_service, recvs, metrics, req_protocol_names } = self; let IncomingRequestReceivers { pov_req_receiver, @@ -123,6 +131,7 @@ impl AvailabilityDistributionSubsystem { "chunk-receiver", run_chunk_receivers( sender, + authority_discovery_service, chunk_req_v1_receiver, chunk_req_v2_receiver, metrics.clone(), @@ -167,14 +176,14 @@ impl AvailabilityDistributionSubsystem { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg: - AvailabilityDistributionMessage::FetchPoV { - relay_parent, - from_validator, - para_id, - candidate_hash, - pov_hash, - tx, - }, + AvailabilityDistributionMessage::FetchPoV { + relay_parent, + from_validator, + para_id, + candidate_hash, + pov_hash, + tx, + }, } => { log_error( pov_requester::fetch_pov( @@ -188,7 +197,7 @@ impl AvailabilityDistributionSubsystem { tx, metrics.clone(), ) - .await, + .await, "pov_requester::fetch_pov", &mut warn_freq, )?; diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index 6512fcb7f656a..9ac0b22cbae87 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -25,9 +25,11 @@ use fatality::Nested; use polkadot_node_network_protocol::{ request_response::{v1, v2, IncomingRequest, IncomingRequestReceiver, IsRequest}, UnifiedReputationChange as Rep, + authority_discovery::AuthorityDiscovery, }; use polkadot_node_primitives::{AvailableData, ErasureChunk}; use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, SubsystemSender}; +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; use polkadot_primitives::{CandidateHash, ValidatorIndex}; use crate::{ @@ -67,13 +69,16 @@ pub async fn run_pov_receiver( } /// Receiver task to be forked as a separate task to handle chunk requests. -pub async fn run_chunk_receivers( +pub async fn run_chunk_receivers( mut sender: Sender, + mut authority_discovery: AD, mut receiver_v1: IncomingRequestReceiver, mut receiver_v2: IncomingRequestReceiver, metrics: Metrics, ) where - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender, + AD: AuthorityDiscovery + Clone + Sync { let make_resp_v1 = |chunk: Option| match chunk { None => v1::ChunkFetchingResponse::NoSuchChunk, @@ -89,7 +94,7 @@ pub async fn run_chunk_receivers( select! { res = receiver_v1.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { Ok(Ok(msg)) => { - answer_chunk_request_log(&mut sender, msg, make_resp_v1, &metrics).await; + answer_chunk_request_log(&mut sender, &mut authority_discovery, msg, make_resp_v1, &metrics).await; }, Err(fatal) => { gum::debug!( @@ -109,7 +114,7 @@ pub async fn run_chunk_receivers( }, res = receiver_v2.recv(|| vec![COST_INVALID_REQUEST]).fuse() => match res.into_nested() { Ok(Ok(msg)) => { - answer_chunk_request_log(&mut sender, msg.into(), make_resp_v2, &metrics).await; + answer_chunk_request_log(&mut sender, &mut authority_discovery, msg.into(), make_resp_v2, &metrics).await; }, Err(fatal) => { gum::debug!( @@ -158,20 +163,25 @@ pub async fn answer_pov_request_log( /// Variant of `answer_chunk_request` that does Prometheus metric and logging on errors. /// /// Any errors of `answer_request` will simply be logged. -pub async fn answer_chunk_request_log( +pub async fn answer_chunk_request_log( sender: &mut Sender, + authority_discovery: &mut AD, req: IncomingRequest, make_response: MakeResp, metrics: &Metrics, ) where + AD: AuthorityDiscovery , Req: IsRequest + Decode + Encode + Into, Req::Response: Encode, - Sender: SubsystemSender, + Sender: SubsystemSender + + SubsystemSender, MakeResp: Fn(Option) -> Req::Response, { - let res = answer_chunk_request(sender, req, make_response).await; + let res = answer_chunk_request(sender, authority_discovery, req, make_response).await; match res { - Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }), + Ok(result) => { + metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }) + }, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -212,13 +222,16 @@ where /// Answer an incoming chunk request by querying the av store. /// /// Returns: `Ok(true)` if chunk was found and served. -pub async fn answer_chunk_request( +pub async fn answer_chunk_request( sender: &mut Sender, + authority_discovery: &mut AD, req: IncomingRequest, make_response: MakeResp, ) -> Result where - Sender: SubsystemSender, + AD: AuthorityDiscovery, + Sender: SubsystemSender + + SubsystemSender, Req: IsRequest + Decode + Encode + Into, Req::Response: Encode, MakeResp: Fn(Option) -> Req::Response, @@ -231,6 +244,14 @@ where let result = chunk.is_some(); + if result { + let authority_ids = authority_discovery.get_authority_ids_by_peer_id(req.peer).await; + if let Some(authority_ids) = authority_ids { + _ = sender.try_send_message( + RewardsStatisticsCollectorMessage::ChunkUploaded(payload.candidate_hash, authority_ids)); + } + } + gum::trace!( target: LOG_TARGET, hash = ?payload.candidate_hash, diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs index 0380bc7b7e12c..0a3fa0157929c 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mock.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs @@ -16,21 +16,26 @@ //! Helper functions and tools to generate mock data useful for testing this subsystem. +use std::collections::HashSet; use std::sync::Arc; +use async_trait::async_trait; use sp_keyring::Sr25519Keyring; use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks}; use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV, Proof}; +use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery; use polkadot_primitives::{ CandidateCommitments, CandidateHash, ChunkIndex, CommittedCandidateReceiptV2, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, OccupiedCore, PersistedValidationData, SessionInfo, - ValidatorIndex, + ValidatorIndex, AuthorityDiscoveryId }; use polkadot_primitives_test_helpers::{ dummy_collator, dummy_collator_signature, dummy_hash, dummy_validation_code, CandidateDescriptor, CommittedCandidateReceipt, }; +use sc_network::Multiaddr; +use sc_network_types::PeerId; /// Create dummy session info with two validator groups. pub fn make_session_info() -> SessionInfo { @@ -164,3 +169,23 @@ pub fn get_valid_chunk_data( .expect("There really should be enough chunks."); (root, chunk) } + +#[derive(Debug, Clone)] +pub struct MockEmptyAuthorityDiscovery; + +impl MockEmptyAuthorityDiscovery { + pub fn new() -> Self { + MockEmptyAuthorityDiscovery {} + } +} + +#[async_trait] +impl AuthorityDiscovery for MockEmptyAuthorityDiscovery { + async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option> { + None + } + + async fn get_authority_ids_by_peer_id(&mut self, peer_id: PeerId) -> Option> { + None + } +} \ No newline at end of file diff --git a/polkadot/node/network/availability-distribution/src/tests/mod.rs b/polkadot/node/network/availability-distribution/src/tests/mod.rs index 078220607c37f..8792122be0eea 100644 --- a/polkadot/node/network/availability-distribution/src/tests/mod.rs +++ b/polkadot/node/network/availability-distribution/src/tests/mod.rs @@ -61,6 +61,7 @@ fn test_harness>( keystore, IncomingRequestReceivers { pov_req_receiver, chunk_req_v1_receiver, chunk_req_v2_receiver }, req_protocol_names, + mock::MockEmptyAuthorityDiscovery, Default::default(), ); let subsystem = subsystem.run(context); diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 0f7d961e9f3ed..69601be1154ca 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -528,13 +528,13 @@ async fn handle_recover( let session_info = session_info.clone(); let n_validators = session_info.validators.len(); - launch_recovery_task( state, ctx, response_sender, recovery_strategies, RecoveryParams { + session_index: session_index, validator_authority_keys: session_info.discovery_keys.clone(), n_validators, threshold: recovery_threshold(n_validators)?, diff --git a/polkadot/node/network/availability-recovery/src/task/mod.rs b/polkadot/node/network/availability-recovery/src/task/mod.rs index 0a8b52411afee..ba4f131067c78 100644 --- a/polkadot/node/network/availability-recovery/src/task/mod.rs +++ b/polkadot/node/network/availability-recovery/src/task/mod.rs @@ -32,16 +32,20 @@ use crate::{metrics::Metrics, ErasureTask, PostRecoveryCheck, LOG_TARGET}; use codec::Encode; use polkadot_node_primitives::AvailableData; -use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, overseer, RecoveryError}; -use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; +use polkadot_node_subsystem::{messages::AvailabilityStoreMessage, overseer, RecoveryError, Subsystem, SubsystemSender}; +use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex}; use sc_network::ProtocolName; use futures::channel::{mpsc, oneshot}; use std::collections::VecDeque; +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; /// Recovery parameters common to all strategies in a `RecoveryTask`. #[derive(Clone)] pub struct RecoveryParams { + /// Session index where the validators belong to + pub session_index: SessionIndex, + /// Discovery ids of `validators`. pub validator_authority_keys: Vec, @@ -96,7 +100,8 @@ pub struct RecoveryTask { impl RecoveryTask where - Sender: overseer::AvailabilityRecoverySenderTrait, + Sender: overseer::AvailabilityRecoverySenderTrait + + SubsystemSender, { /// Instantiate a new recovery task. pub fn new( @@ -174,10 +179,16 @@ where self.params.metrics.on_recovery_invalid(strategy_type), _ => self.params.metrics.on_recovery_failed(strategy_type), } + _ = self.state.get_download_chunks_metrics(); return Err(err) }, Ok(data) => { self.params.metrics.on_recovery_succeeded(strategy_type, data.encoded_size()); + _ = self.sender.try_send_message( + RewardsStatisticsCollectorMessage::ChunksDownloaded( + self.params.session_index, + self.params.candidate_hash, + self.state.get_download_chunks_metrics())); return Ok(data) }, } diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs index 6b34538b62662..e618c7f19b3ec 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/chunks.rs @@ -33,6 +33,7 @@ use polkadot_primitives::ValidatorIndex; use futures::{channel::oneshot, SinkExt}; use rand::seq::SliceRandom; use std::collections::VecDeque; +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; /// Parameters specific to the `FetchChunks` strategy. pub struct FetchChunksParams { diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/full.rs b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs index 1d7fbe8ea3c8d..fd4d29ff33f49 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/full.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/full.rs @@ -62,7 +62,7 @@ impl RecoveryStrategy async fn run( mut self: Box, - _: &mut State, + state: &mut State, sender: &mut Sender, common_params: &RecoveryParams, ) -> Result { @@ -126,6 +126,7 @@ impl RecoveryStrategy ); common_params.metrics.on_full_request_succeeded(); + state.note_received_available_data(validator_index); return Ok(data) }, None => { diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs index 1403277c8a95b..27d3e2575c89c 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/mod.rs @@ -45,9 +45,10 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{AuthorityDiscoveryId, BlakeTwo256, ChunkIndex, HashT, ValidatorIndex}; use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; use std::{ - collections::{BTreeMap, HashMap, VecDeque}, + collections::{BTreeMap, HashMap, VecDeque, hash_map::Entry}, time::Duration, }; +use std::ops::Add; // How many parallel chunk fetching requests should be running at once. const N_PARALLEL: usize = 50; @@ -200,6 +201,11 @@ struct Chunk { validator_index: ValidatorIndex, } +enum ReceivedAvailableData { + Chunk(Chunk), + Full, +} + /// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the /// same `RecoveryTask`. pub struct State { @@ -211,17 +217,36 @@ pub struct State { /// A record of errors returned when requesting a chunk from a validator. recorded_errors: HashMap<(AuthorityDiscoveryId, ValidatorIndex), ErrorRecord>, + + // a counter of received available data including individual chunks and full available data + received_available_data_by: HashMap, } impl State { pub fn new() -> Self { - Self { received_chunks: BTreeMap::new(), recorded_errors: HashMap::new() } + Self { + received_chunks: BTreeMap::new(), + recorded_errors: HashMap::new(), + received_available_data_by: HashMap::new(), + } } fn insert_chunk(&mut self, chunk_index: ChunkIndex, chunk: Chunk) { self.received_chunks.insert(chunk_index, chunk); } + // increase the counter of received available data of the given validator index + fn note_received_available_data(&mut self, sender: ValidatorIndex) { + let mut counter = self.received_available_data_by.entry(sender).or_default(); + *counter += 1; + } + + // drain the record of chunks received per validator returning + // all the contained data + pub fn get_download_chunks_metrics(&mut self) -> HashMap { + self.received_available_data_by.drain().collect() + } + fn chunk_count(&self) -> usize { self.received_chunks.len() } @@ -467,6 +492,8 @@ impl State { ) -> (usize, usize) { let metrics = ¶ms.metrics; + let mut received_chunks: HashMap = HashMap::new(); + let mut total_received_responses = 0; let mut error_count = 0; @@ -506,6 +533,7 @@ impl State { chunk.index, Chunk { chunk: chunk.chunk, validator_index }, ); + self.note_received_available_data(validator_index); } else { metrics.on_chunk_request_invalid(strategy_type); error_count += 1; @@ -669,6 +697,7 @@ mod tests { let (erasure_task_tx, _erasure_task_rx) = mpsc::channel(10); Self { + session_index: SessionIndex(0), validator_authority_keys: validator_authority_id(&validators), n_validators: validators.len(), threshold: recovery_threshold(validators.len()).unwrap(), diff --git a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs index 8b8cff549912e..f0fb21fe7ca81 100644 --- a/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs +++ b/polkadot/node/network/availability-recovery/src/task/strategy/systematic.rs @@ -31,6 +31,7 @@ use polkadot_node_subsystem::{overseer, RecoveryError}; use polkadot_primitives::{ChunkIndex, ValidatorIndex}; use std::collections::VecDeque; +use polkadot_node_subsystem::messages::RewardsStatisticsCollectorMessage; /// Parameters needed for fetching systematic chunks. pub struct FetchSystematicChunksParams { diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs index d618c0c7ca953..4a228bb9b729a 100644 --- a/polkadot/node/overseer/src/dummy.rs +++ b/polkadot/node/overseer/src/dummy.rs @@ -89,6 +89,7 @@ pub fn dummy_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -133,6 +134,7 @@ pub fn one_for_all_overseer_builder( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -163,7 +165,8 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> - + Subsystem, SubsystemError>, + + Subsystem, SubsystemError> + + Subsystem, SubsystemError>, { let metrics = ::register(registry)?; @@ -192,6 +195,7 @@ where .dispute_distribution(subsystem.clone()) .chain_selection(subsystem.clone()) .prospective_parachains(subsystem.clone()) + .rewards_statistics_collector(subsystem.clone()) .activation_external_listeners(Default::default()) .active_leaves(Default::default()) .spawner(SpawnGlue(spawner)) diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 83709e73027b9..eb5c860b6bc68 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -85,6 +85,7 @@ use polkadot_node_subsystem_types::messages::{ DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, NetworkBridgeRxMessage, NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProvisionerMessage, RuntimeApiMessage, StatementDistributionMessage, + RewardsStatisticsCollectorMessage, }; pub use polkadot_node_subsystem_types::{ @@ -518,6 +519,7 @@ pub struct Overseer { #[subsystem(AvailabilityDistributionMessage, sends: [ AvailabilityStoreMessage, ChainApiMessage, + RewardsStatisticsCollectorMessage, RuntimeApiMessage, NetworkBridgeTxMessage, ])] @@ -527,6 +529,7 @@ pub struct Overseer { NetworkBridgeTxMessage, RuntimeApiMessage, AvailabilityStoreMessage, + RewardsStatisticsCollectorMessage, ])] availability_recovery: AvailabilityRecovery, @@ -607,6 +610,7 @@ pub struct Overseer { CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, + RewardsStatisticsCollectorMessage, DisputeCoordinatorMessage, RuntimeApiMessage, ])] @@ -616,6 +620,7 @@ pub struct Overseer { CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, + RewardsStatisticsCollectorMessage, DisputeCoordinatorMessage, RuntimeApiMessage, NetworkBridgeTxMessage, @@ -659,6 +664,12 @@ pub struct Overseer { ])] prospective_parachains: ProspectiveParachains, + #[subsystem(RewardsStatisticsCollectorMessage, sends: [ + RuntimeApiMessage, + ChainApiMessage, + ])] + rewards_statistics_collector: RewardsStatisticsCollector, + /// External listeners waiting for a hash to be in the active-leave set. pub activation_external_listeners: HashMap>>>, diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 50bb0dc698669..b6de3790b7e86 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -127,6 +127,7 @@ polkadot-node-core-chain-api = { optional = true, workspace = true, default-feat polkadot-node-core-chain-selection = { optional = true, workspace = true, default-features = true } polkadot-node-core-dispute-coordinator = { optional = true, workspace = true, default-features = true } polkadot-node-core-prospective-parachains = { optional = true, workspace = true, default-features = true } +polkadot-node-core-rewards-statistics-collector = { optional = true, workspace = true, default-features = true } polkadot-node-core-provisioner = { optional = true, workspace = true, default-features = true } polkadot-node-core-pvf = { optional = true, workspace = true, default-features = true } polkadot-node-core-pvf-checker = { optional = true, workspace = true, default-features = true } @@ -169,6 +170,7 @@ full-node = [ "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-prospective-parachains", + "polkadot-node-core-rewards-statistics-collector", "polkadot-node-core-provisioner", "polkadot-node-core-pvf", "polkadot-node-core-pvf-checker", diff --git a/polkadot/node/service/src/builder/mod.rs b/polkadot/node/service/src/builder/mod.rs index 9d52617cc8da9..2bee75dcfad1c 100644 --- a/polkadot/node/service/src/builder/mod.rs +++ b/polkadot/node/service/src/builder/mod.rs @@ -41,6 +41,10 @@ use polkadot_node_core_chain_selection::{ self as chain_selection_subsystem, Config as ChainSelectionConfig, }; use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; +use polkadot_node_core_rewards_statistics_collector::{ + Config as RewardsStatisticsCollectorConfig, + RewardsStatisticsCollector +}; use polkadot_node_network_protocol::{ peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{IncomingRequest, ReqProtocolNames}, @@ -76,6 +80,8 @@ pub struct NewFullParams { pub node_version: Option, /// Whether the node is attempting to run as a secure validator. pub secure_validator_mode: bool, + /// Whether the node will publish collected approval metrics per validator + pub verbose_approval_metrics: bool, /// An optional path to a directory containing the workers. pub workers_path: Option, /// Optional custom names for the prepare and execute workers. @@ -197,6 +203,7 @@ where telemetry_worker_handle: _, node_version, secure_validator_mode, + verbose_approval_metrics, workers_path, workers_names, overseer_gen, @@ -436,6 +443,10 @@ where }, }; + let rewards_statistics_collector_config = RewardsStatisticsCollectorConfig{ + verbose_approval_metrics, + }; + Some(ExtendedOverseerGenArgs { keystore: keystore_container.local_keystore(), parachains_db, @@ -452,6 +463,7 @@ where fetch_chunks_threshold, invulnerable_ah_collators, collator_protocol_hold_off, + rewards_statistics_collector_config, }) }; diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index f43940c8474f2..d9fcb929acd4a 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -28,6 +28,7 @@ use polkadot_primitives::{ InboundHrmpMessage, Nonce, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + vstaging::ApprovalStatistics, }; use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -203,6 +204,13 @@ sp_api::impl_runtime_apis! { unimplemented!() } + fn submit_approval_statistics( + _: ApprovalStatistics, + _: ValidatorSignature, + ) { + unimplemented!() + } + fn pvfs_require_precheck() -> Vec { unimplemented!() } diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index d6ed752b4c31c..9dbb282e73d75 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -21,6 +21,7 @@ use sp_core::traits::SpawnNamed; use polkadot_availability_distribution::IncomingRequestReceivers; use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig; +use polkadot_node_core_rewards_statistics_collector::Config as RewardsStatisticsCollectorConfig; use polkadot_node_core_av_store::Config as AvailabilityConfig; use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig; use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; @@ -77,6 +78,7 @@ pub use polkadot_node_core_provisioner::ProvisionerSubsystem; pub use polkadot_node_core_pvf_checker::PvfCheckerSubsystem; pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem; pub use polkadot_statement_distribution::StatementDistributionSubsystem; +pub use polkadot_node_core_rewards_statistics_collector::RewardsStatisticsCollector; /// Arguments passed for overseer construction. pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> @@ -133,6 +135,7 @@ pub struct ExtendedOverseerGenArgs { pub candidate_req_v2_receiver: IncomingRequestReceiver, /// Configuration for the approval voting subsystem. pub approval_voting_config: ApprovalVotingConfig, + pub rewards_statistics_collector_config: RewardsStatisticsCollectorConfig, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Configuration for the dispute coordinator subsystem. @@ -183,6 +186,7 @@ pub fn validator_overseer_builder( fetch_chunks_threshold, invulnerable_ah_collators, collator_protocol_hold_off, + rewards_statistics_collector_config, }: ExtendedOverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -192,7 +196,9 @@ pub fn validator_overseer_builder( PvfCheckerSubsystem, CandidateBackingSubsystem, StatementDistributionSubsystem, - AvailabilityDistributionSubsystem, + AvailabilityDistributionSubsystem< + AuthorityDiscoveryService + >, AvailabilityRecoverySubsystem, BitfieldSigningSubsystem, BitfieldDistributionSubsystem, @@ -218,6 +224,7 @@ pub fn validator_overseer_builder( DisputeDistributionSubsystem, ChainSelectionSubsystem, ProspectiveParachainsSubsystem, + RewardsStatisticsCollector, >, Error, > @@ -261,6 +268,7 @@ where chunk_req_v2_receiver, }, req_protocol_names.clone(), + authority_discovery_service.clone(), Metrics::register(registry)?, )) .availability_recovery(AvailabilityRecoverySubsystem::for_validator( @@ -350,6 +358,11 @@ where )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) + .rewards_statistics_collector(RewardsStatisticsCollector::new( + keystore.clone(), + Metrics::register(registry)?, + rewards_statistics_collector_config, + )) .activation_external_listeners(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_client) @@ -416,6 +429,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, Error, > @@ -495,6 +509,7 @@ where .dispute_distribution(DummySubsystem) .chain_selection(DummySubsystem) .prospective_parachains(DummySubsystem) + .rewards_statistics_collector(DummySubsystem) .activation_external_listeners(Default::default()) .active_leaves(Default::default()) .supports_parachains(runtime_client) diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 99ece70d02a02..d537916fdada8 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -36,6 +36,7 @@ polkadot-availability-recovery = { features = ["subsystem-benchmarks"], workspac polkadot-dispute-distribution = { workspace = true, default-features = true } polkadot-node-core-av-store = { workspace = true, default-features = true } polkadot-node-core-dispute-coordinator = { workspace = true, default-features = true } +polkadot-node-core-rewards-statistics-collector = { workspace = true, default-features = true } polkadot-node-network-protocol = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs index f4dfa47ff7621..4650eda95abdd 100644 --- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs @@ -54,6 +54,12 @@ use polkadot_node_primitives::approval::time::{ slot_number_to_tick, tick_to_slot_number, Clock, ClockExt, SystemClock, }; +use polkadot_node_core_rewards_statistics_collector::{ + RewardsStatisticsCollector as RewardsStatisticsCollectorSubsystem, + metrics::Metrics as RewardsStatisticsMetrics, + RewardsStatisticsCollector, + Config as RewardsStatisticsConfig +}; use polkadot_node_core_approval_voting::{ ApprovalVotingSubsystem, Config as ApprovalVotingConfig, RealAssignmentCriteria, }; @@ -853,6 +859,16 @@ fn build_overseer( let mock_rx_bridge = MockNetworkBridgeRx::new(network_receiver, None); let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap(); let task_handle = spawn_task_handle.clone(); + + let rewards_metrics = RewardsStatisticsMetrics::try_register(&dependencies.registry).unwrap(); + let rewards_statistics_collector_subsystem = RewardsStatisticsCollectorSubsystem::new( + keystore.clone(), + rewards_metrics, + RewardsStatisticsConfig{ + verbose_approval_metrics: false, + }, + ); + let dummy = dummy_builder!(task_handle, overseer_metrics) .replace_chain_api(|_| mock_chain_api) .replace_chain_selection(|_| mock_chain_selection) @@ -860,7 +876,8 @@ fn build_overseer( .replace_network_bridge_tx(|_| mock_tx_bridge) .replace_network_bridge_rx(|_| mock_rx_bridge) .replace_availability_recovery(|_| MockAvailabilityRecovery::new()) - .replace_candidate_validation(|_| MockCandidateValidation::new()); + .replace_candidate_validation(|_| MockCandidateValidation::new()) + .replace_rewards_statistics_collector(|_| rewards_statistics_collector_subsystem); let (overseer, raw_handle) = if state.options.approval_voting_parallel_enabled { let approval_voting_parallel = ApprovalVotingParallelSubsystem::with_config_and_clock( @@ -1172,7 +1189,12 @@ pub async fn bench_approvals_run( ); env.collect_resource_usage( - &["approval-distribution", "approval-voting", "approval-voting-parallel"], + &[ + "approval-distribution", + "approval-voting", + "approval-voting-parallel", + "rewards-statistics-collector" + ], true, ) } diff --git a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs index b346f988a3c90..e5865164f9a5d 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/mod.rs @@ -28,6 +28,7 @@ use crate::{ network::new_network, usage::BenchmarkUsage, }; +use async_trait::async_trait; use colored::Colorize; use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; @@ -40,6 +41,7 @@ use polkadot_availability_recovery::{AvailabilityRecoverySubsystem, RecoveryStra use polkadot_node_core_av_store::AvailabilityStoreSubsystem; use polkadot_node_metrics::metrics::Metrics; use polkadot_node_network_protocol::{ + authority_discovery::AuthorityDiscovery, request_response::{v1, v2, IncomingRequest}, OurView, }; @@ -49,15 +51,17 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::messages::{AvailabilityStoreMessage, NetworkBridgeEvent}; use polkadot_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle}; -use polkadot_primitives::{Block, CoreIndex, GroupIndex, Hash}; +use polkadot_primitives::{AuthorityDiscoveryId, Block, CoreIndex, GroupIndex, Hash}; use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig}; use std::{ops::Sub, sync::Arc, time::Instant}; +use std::collections::HashSet; use strum::Display; use sc_service::SpawnTaskHandle; use serde::{Deserialize, Serialize}; +use sc_network_types::multiaddr::Multiaddr; +use sc_network_types::PeerId; pub use test_state::TestState; - mod av_store_helpers; mod test_state; @@ -120,7 +124,7 @@ fn build_overseer_for_availability_write( spawn_task_handle: SpawnTaskHandle, runtime_api: MockRuntimeApi, (network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx), - availability_distribution: AvailabilityDistributionSubsystem, + availability_distribution: AvailabilityDistributionSubsystem, chain_api: MockChainApi, availability_store: AvailabilityStoreSubsystem, bitfield_distribution: BitfieldDistribution, @@ -267,6 +271,7 @@ pub fn prepare_test( chunk_req_v2_receiver, }, state.req_protocol_names.clone(), + TestAuthorityDiscovery, Metrics::try_register(&dependencies.registry).unwrap(), ); @@ -506,3 +511,17 @@ pub async fn benchmark_availability_write( false, ) } + +#[derive(Debug, Clone)] +pub struct TestAuthorityDiscovery; + +#[async_trait] +impl AuthorityDiscovery for TestAuthorityDiscovery { + async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option> { + None + } + + async fn get_authority_ids_by_peer_id(&mut self, peer_id: PeerId) -> Option> { + None + } +} diff --git a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs index 092a8fc5f4c12..d4f0fbb36dc27 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/dummy.rs @@ -99,3 +99,4 @@ mock!(ApprovalVoting); mock!(ApprovalVotingParallel); mock!(ApprovalDistribution); mock!(RuntimeApi); +mock!(RewardsStatisticsCollector); diff --git a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs index a3702dfe792f4..181dd11ad8841 100644 --- a/polkadot/node/subsystem-bench/src/lib/mock/mod.rs +++ b/polkadot/node/subsystem-bench/src/lib/mock/mod.rs @@ -71,6 +71,7 @@ macro_rules! dummy_builder { .gossip_support(MockGossipSupport {}) .dispute_distribution(MockDisputeDistribution {}) .prospective_parachains(MockProspectiveParachains {}) + .rewards_statistics_collector(MockRewardsStatisticsCollector {}) .activation_external_listeners(Default::default()) .active_leaves(Default::default()) .metrics($metrics) diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 8805a330a99f6..693ef119da553 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -64,6 +64,7 @@ use std::{ /// Network events as transmitted to other subsystems, wrapped in their message types. pub mod network_bridge_event; pub use network_bridge_event::NetworkBridgeEvent; +use polkadot_primitives::vstaging::ApprovalStatistics; /// A request to the candidate backing subsystem to check whether /// we can second this candidate. @@ -730,6 +731,8 @@ pub enum RuntimeApiRequest { FetchOnChainVotes(RuntimeApiSender>), /// Submits a PVF pre-checking statement into the transaction pool. SubmitPvfCheckStatement(PvfCheckStatement, ValidatorSignature, RuntimeApiSender<()>), + /// Submits the Rewards Approvals Statistics into the transaction pool. + SubmitApprovalStatistics(ApprovalStatistics, ValidatorSignature, RuntimeApiSender<()>), /// Returns code hashes of PVFs that require pre-checking by validators in the active set. PvfsRequirePrecheck(RuntimeApiSender>), /// Get the validation code used by the specified para, taking the given @@ -1471,3 +1474,16 @@ pub enum ProspectiveParachainsMessage { oneshot::Sender>, ), } + +/// Messages sent to the Statistics Collector subsystem. +#[derive(Debug)] +pub enum RewardsStatisticsCollectorMessage { + ChunksDownloaded(SessionIndex, CandidateHash, HashMap), + ChunkUploaded(CandidateHash, HashSet), + + // Candidate received enough approval and now is approved + CandidateApproved(CandidateHash, Hash, Vec), + + // Set of candidates that has not shared votes in time + NoShows(CandidateHash, Hash, Vec), +} \ No newline at end of file diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index bb8f359b729ba..46528acb3c9a5 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -36,6 +36,7 @@ use std::{ collections::{BTreeMap, VecDeque}, sync::Arc, }; +use polkadot_primitives::vstaging::ApprovalStatistics; /// Offers header utilities. /// @@ -218,6 +219,14 @@ pub trait RuntimeApiSubsystemClient { signature: ValidatorSignature, ) -> Result<(), ApiError>; + /// Submits the collected approval statistics collected for the session + async fn submit_approval_statistics( + &self, + at: Hash, + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) -> Result<(), ApiError>; + /// Returns code hashes of PVFs that require pre-checking by validators in the active set. /// /// NOTE: This function is only available since parachain host version 2. @@ -533,6 +542,20 @@ where runtime_api.submit_pvf_check_statement(at, stmt, signature) } + async fn submit_approval_statistics( + &self, + at: Hash, + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) -> Result<(), ApiError> { + let mut runtime_api = self.client.runtime_api(); + runtime_api.register_extension( + self.offchain_transaction_pool_factory.offchain_transaction_pool(at), + ); + + runtime_api.submit_approval_statistics(at, payload, signature) + } + async fn pvfs_require_precheck(&self, at: Hash) -> Result, ApiError> { self.client.runtime_api().pvfs_require_precheck(at) } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index f6ed513c76a33..4c9b353faa944 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -101,6 +101,7 @@ fn main() -> Result<()> { keep_finalized_for: None, invulnerable_ah_collators: HashSet::new(), collator_protocol_hold_off: None, + verbose_approval_metrics: false, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 518a828e7e0e8..93b148e29709e 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -128,6 +128,7 @@ use alloc::{ }; use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; +use crate::vstaging::ApprovalStatistics; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. @@ -214,6 +215,10 @@ sp_api::decl_runtime_apis! { /// NOTE: This function is only available since parachain host version 2. fn submit_pvf_check_statement(stmt: PvfCheckStatement, signature: ValidatorSignature); + + /// Submits the session collected proof statistics into the transaction pool. + fn submit_approval_statistics(payload: ApprovalStatistics, signature: ValidatorSignature); + /// Returns code hashes of PVFs that require pre-checking by validators in the active set. /// /// NOTE: This function is only available since parachain host version 2. diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 72c39023c7e1f..03e979b44c391 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -15,3 +15,57 @@ // along with Polkadot. If not, see . //! Staging Primitives. + +use scale_info::TypeInfo; +use sp_api::__private::{Decode, Encode}; +use sp_application_crypto::RuntimeDebug; +use sp_core::DecodeWithMemTracking; +use sp_staking::SessionIndex; +use crate::ValidatorIndex; +use alloc::vec::Vec; + +/// A reward tally line represent the collected statistics about +/// approvals voting for a given validator, how much successful approvals +/// was collected and how many times the given validator no-showed +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + DecodeWithMemTracking, + TypeInfo, +)] +pub struct ApprovalStatisticsTallyLine { + /// represents the validator to which the statistics belongs to + pub validator_index: ValidatorIndex, + + /// how many times the validator had sent useful approval votes + /// that contribute the successful approval of a candidate + pub approvals_usage: u32, + + /// how many times the validator was supposed to send a vote but + /// no showed + pub no_shows: u32, +} + +/// ApprovalRewards is the set of tallies where each tally represents +/// a given validator and its approval voting statistics +#[derive( + RuntimeDebug, + Clone, + PartialEq, + Encode, + Decode, + DecodeWithMemTracking, + TypeInfo, +)] +pub struct ApprovalStatistics(pub SessionIndex, pub ValidatorIndex, pub Vec); + +impl ApprovalStatistics { + pub fn signing_payload(&self) -> Vec { + const MAGIC: [u8; 4] = *b"APST"; // for "approval statistics" + (MAGIC, self.0, self.1.clone()).encode() + } +} diff --git a/polkadot/runtime/parachains/src/approvals_rewards/benchmarking.rs b/polkadot/runtime/parachains/src/approvals_rewards/benchmarking.rs new file mode 100644 index 0000000000000..b573b2d8f56ea --- /dev/null +++ b/polkadot/runtime/parachains/src/approvals_rewards/benchmarking.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use alloc::vec; +use frame_benchmarking::v2::*; +use polkadot_primitives::{PvfCheckStatement, ValidatorId, ValidatorIndex, ValidatorSignature}; +use polkadot_primitives::{ + vstaging::ApprovalStatistics, + SessionIndex, +}; +use frame_system::{RawOrigin}; +use sp_application_crypto::RuntimeAppPublic; +use crate::{configuration, shared}; + +// Constants for the benchmarking +const VALIDATOR_NUM: usize = 800; +const SESSION_INDEX: SessionIndex = 1; + +fn initialize() +where + T: Config + shared::Config, +{ + // 0. generate a list of validators + let validators = (0..VALIDATOR_NUM) + .map(|_| ::generate_pair(None)) + .collect::>(); + + // 1. Make sure PVF pre-checking is enabled in the config. + let config = configuration::ActiveConfig::::get(); + configuration::Pallet::::force_set_active_config(config.clone()); + + // 2. initialize a new session with deterministic validator set. + crate::shared::pallet::Pallet::::set_active_validators_ascending(validators.clone()); + crate::shared::pallet::Pallet::::set_session_index(SESSION_INDEX); +} + +fn generate_approvals_tallies() -> impl Iterator +where + T: Config + shared::Config +{ + let validators = shared::ActiveValidatorKeys::::get(); + + (0..validators.len()).map(move |validator_index| { + let mut tally = vec![]; + let payload = ApprovalStatistics(SESSION_INDEX, ValidatorIndex(validator_index as u32), tally); + let signature = validators[validator_index].sign(&payload.signing_payload()).unwrap(); + (payload, signature) + }) +} + +#[benchmarks] +mod benchmarks { + use super::*; + #[benchmark] + fn include_approvals_rewards_statistics() { + initialize::(); + let (payload, signature) = generate_approvals_tallies::().next().unwrap();; + + #[block] + { + let _ = + Pallet::::include_approvals_rewards_statistics(RawOrigin::None.into(), payload, signature); + } + } + + impl_benchmark_test_suite! { + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + } +} \ No newline at end of file diff --git a/polkadot/runtime/parachains/src/approvals_rewards/mod.rs b/polkadot/runtime/parachains/src/approvals_rewards/mod.rs new file mode 100644 index 0000000000000..cb7b1fd4b1ed6 --- /dev/null +++ b/polkadot/runtime/parachains/src/approvals_rewards/mod.rs @@ -0,0 +1,295 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Approvals Rewards pallet. + +use alloc::vec::*; +use crate::{ + configuration, + inclusion::{QueueFootprinter, UmpQueueId}, + initializer::SessionChangeNotification, + session_info, + shared, +}; +use codec::{Decode, Encode}; +use core::{cmp, mem}; +use frame_support::{ + pallet_prelude::*, + traits::{EnsureOriginWithArg, EstimateNextSessionRotation}, + DefaultNoBound, +}; +use scale_info::{ + Type, TypeInfo, + prelude::vec, +}; +use sp_runtime::{ + traits::{AppVerify, One, Saturating}, + DispatchResult, SaturatedConversion, +}; +use frame_system::pallet_prelude::*; +use polkadot_primitives::{ + vstaging::ApprovalStatistics, + slashing::{DisputeProof, DisputesTimeSlot, PendingSlashes}, + CandidateHash, + DisputeOffenceKind, + SessionIndex, ValidatorId, ValidatorIndex, ValidatorSignature, + IndexedVec, byzantine_threshold +}; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +const LOG_TARGET: &str = "runtime::approvals_rewards"; + +pub use pallet::*; +use polkadot_primitives::vstaging::ApprovalStatisticsTallyLine; + +pub trait WeightInfo { + fn include_approvals_rewards_statistics() -> Weight; +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn include_approvals_rewards_statistics() -> Weight { + // This special value is to distinguish from the finalizing variants above in tests. + Weight::MAX - Weight::from_parts(1, 1) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use polkadot_primitives::vstaging::ApprovalStatisticsTallyLine; + use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, + }; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + frame_system::Config + + configuration::Config + + shared::Config + + session_info::Config + + frame_system::offchain::CreateBare> + { + #[allow(deprecated)] + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + // Weight information for extrinsics in this pallet. + // type WeightInfo: WeightInfo; + } + + /// Actual past code hash, indicated by the para id as well as the block number at which it + /// became outdated. + #[pallet::storage] + pub(super) type ApprovalsTallies = + StorageMap<_, Twox64Concat, (SessionIndex, ValidatorIndex), Vec>; + + #[pallet::storage] + pub(super) type AvailableApprovalsMedians = + StorageMap<_, Twox64Concat, SessionIndex, Vec>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + ApprovalTalliesStored((SessionIndex, ValidatorIndex)) + } + + #[pallet::error] + pub enum Error { + /// The approval rewards payload has a future session index. + ApprovalRewardsFutureSession, + + /// The approval rewards payloads has an already pruned session index. + ApprovalRewardsPassedSession, + + /// The session index has no available data and is not the current session index + ApprovalRewardsUnknownSessionIndex, + + /// Validator index is not in the session validators bounds + ApprovalRewardsValidatorIndexOutOfBounds, + + /// Invalid signed payload + ApprovalRewardsInvalidSignature, + + /// The validator already have submitted a tally for that session + ApprovalTalliesAlreadyStored, + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight(1)] + //#[pallet::weight(::WeightInfo::include_approvals_rewards_statistics())] + pub fn include_approvals_rewards_statistics( + origin: OriginFor, + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + let current_session = shared::CurrentSessionIndex::::get(); + let payload_session_index = payload.0; + let payload_validator_index = payload.1; + + let config = configuration::ActiveConfig::::get(); + + if payload_session_index > current_session { + return Err(Error::::ApprovalRewardsFutureSession.into()) + } else if payload_session_index < current_session.saturating_sub(config.dispute_period) { + return Err(Error::::ApprovalRewardsPassedSession.into()) + } + + let validator_public = if payload_session_index == current_session { + let validators = shared::ActiveValidatorKeys::::get(); + let validator_index = payload_validator_index.0 as usize; + validators + .get(validator_index) + .ok_or(Error::::ApprovalRewardsValidatorIndexOutOfBounds)? + .clone() + } else { + let session_info = match session_info::Sessions::::get(payload_session_index) { + Some(s) => s, + None => return Err(Error::::ApprovalRewardsUnknownSessionIndex.into()), + }; + + session_info.validators + .get(payload_validator_index) + .ok_or(Error::::ApprovalRewardsValidatorIndexOutOfBounds)? + .clone() + }; + + let signing_payload = payload.signing_payload(); + ensure!( + signature.verify(&signing_payload[..], &validator_public), + Error::::ApprovalRewardsInvalidSignature, + ); + + let approvals_key = (payload_session_index, payload_validator_index); + + // Ensure that it is a fresh session tally. + if ApprovalsTallies::::contains_key(&approvals_key) { + return Err(Error::::ApprovalTalliesAlreadyStored.into()) + } + + ApprovalsTallies::::insert(approvals_key, payload.2); + Self::deposit_event(Event::ApprovalTalliesStored(approvals_key)); + //Ok(Some(::WeightInfo::include_approvals_rewards_statistics()).into()) + Ok(Pays::No.into()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::include_approvals_rewards_statistics { payload, signature } => { + ValidTransaction::with_tag_prefix("ApprovalRewardsStatistics") + .priority(TransactionPriority::max_value()) + .longevity(64_u64) + .and_provides((payload.0, payload.1, payload.2.clone())) + .propagate(true) + .build() + } + _ => InvalidTransaction::Call.into(), + } + } + + fn pre_dispatch(_call: &Self::Call) -> Result<(), TransactionValidityError> { + Ok(()) + } + } +} + +impl Pallet { + /// Handle an incoming session change. + pub(crate) fn initializer_on_new_session( + notification: &SessionChangeNotification>, + ) { + let previous_session = notification.session_index.saturating_sub(1); + let session_info = match session_info::Sessions::::get(previous_session) { + Some(s) => s, + None => return, + }; + + let validators_len = session_info.validators.len(); + + let mut rewards_matrix: Vec> = vec![]; + for idx in 0..validators_len { + let v_idx = ValidatorIndex(idx as u32); + if let Some(tally) = ApprovalsTallies::::get((previous_session, v_idx)) { + rewards_matrix.push(tally); + } + } + + if rewards_matrix.len() >= byzantine_threshold(validators_len) { + let mut approval_usages_medians = Vec::new(); + for (v_idx, _) in session_info.validators.into_iter().enumerate() { + let mut v: Vec = rewards_matrix.iter().map(|at| at[v_idx].approvals_usage).collect(); + v.sort(); + approval_usages_medians.push(v[validators_len/2]); + } + + AvailableApprovalsMedians::::insert(previous_session, approval_usages_medians); + } + + let mut drop_keys = vec![]; + let config = configuration::ActiveConfig::::get(); + ApprovalsTallies::::iter_keys().for_each(|(session_idx, validator_idx)| { + let min_session_to_keep = notification.session_index - config.dispute_period; + if session_idx < min_session_to_keep { + drop_keys.push((session_idx, validator_idx)); + } + }); + + for key in drop_keys { + ApprovalsTallies::::remove(key); + } + } +} + +impl Pallet +where + T: Config + frame_system::offchain::CreateBare> +{ + /// Submits a given PVF check statement with corresponding signature as an unsigned transaction + /// into the memory pool. Ultimately, that disseminates the transaction across the network. + /// + /// This function expects an offchain context and cannot be callable from the on-chain logic. + /// + /// The signature assumed to pertain to `stmt`. + /// + pub(crate) fn submit_approval_statistics( + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) { + use frame_system::offchain::{CreateBare, SubmitTransaction}; + let call = Call::include_approvals_rewards_statistics { payload, signature }; + + let xt = >>::create_bare(call.into()); + + if let Err(e) = SubmitTransaction::>::submit_transaction(xt) { + log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,); + } + } +} \ No newline at end of file diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 6ee245fb5230c..00a1329839b0c 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -24,6 +24,7 @@ use crate::{ configuration::{self, HostConfiguration}, disputes::{self, DisputesHandler as _, SlashingHandler as _}, dmp, hrmp, inclusion, paras, scheduler, session_info, shared, + approvals_rewards, }; use alloc::vec::Vec; use codec::{Decode, Encode}; @@ -125,6 +126,7 @@ pub mod pallet { + disputes::Config + dmp::Config + hrmp::Config + + approvals_rewards::Config { /// A randomness beacon. type Randomness: Randomness>; @@ -283,6 +285,7 @@ impl Pallet { dmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); hrmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); T::CoretimeOnNewSession::on_new_session(¬ification); + approvals_rewards::Pallet::::initializer_on_new_session(¬ification); } /// Should be called when a new session occurs. Buffers the session notification to be applied diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 1cd534257d7f9..55b6e2880f2b4 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -40,6 +40,7 @@ pub mod reward_points; pub mod scheduler; pub mod session_info; pub mod shared; +pub mod approvals_rewards; pub mod runtime_api_impl; diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v13.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v13.rs index dc204c67e3462..0e182151b6b5f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/v13.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/v13.rs @@ -40,6 +40,7 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use polkadot_primitives::vstaging::ApprovalStatistics; use sp_runtime::traits::One; /// Implementation for the `validators` function of the runtime API. diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 0f6725e2785a1..545a414df4159 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,10 +16,11 @@ //! Put implementations of functions from staging APIs here. -use crate::{disputes, initializer, paras}; +use crate::{disputes, initializer, paras, approvals_rewards}; use alloc::vec::Vec; -use polkadot_primitives::{slashing, CandidateHash, Id as ParaId, SessionIndex}; +use polkadot_primitives::{slashing, CandidateHash, Id as ParaId, SessionIndex, ValidatorSignature}; +use polkadot_primitives::vstaging::ApprovalStatistics; /// Implementation of `para_ids` runtime API pub fn para_ids() -> Vec { @@ -31,3 +32,11 @@ pub fn unapplied_slashes_v2( ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { disputes::slashing::Pallet::::unapplied_slashes() } + +/// Submits the collected approval statistics for a given session. +pub fn submit_approval_statistics( + payload: ApprovalStatistics, + signature: ValidatorSignature, +) { + approvals_rewards::Pallet::::submit_approval_statistics(payload, signature) +} \ No newline at end of file diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 80426212d4c29..c0452f2caace4 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -80,6 +80,7 @@ use polkadot_runtime_parachains::{ }, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, + approvals_rewards as parachains_approvals_rewards, }; use rococo_runtime_constants::system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}; use scale_info::TypeInfo; @@ -1224,6 +1225,10 @@ impl parachains_slashing::Config for Runtime { type BenchmarkingConfig = parachains_slashing::BenchConfig<200>; } +impl parachains_approvals_rewards::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const ParaDeposit: Balance = 40 * UNITS; } @@ -1591,6 +1596,7 @@ construct_runtime! { MessageQueue: pallet_message_queue = 64, OnDemandAssignmentProvider: parachains_on_demand = 66, CoretimeAssignmentProvider: parachains_assigner_coretime = 68, + ApprovalsRewards: parachains_approvals_rewards = 69, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar = 70, @@ -1861,6 +1867,7 @@ mod benches { [polkadot_runtime_parachains::paras_inherent, ParaInherent] [polkadot_runtime_parachains::paras, Paras] [polkadot_runtime_parachains::on_demand, OnDemandAssignmentProvider] + [polkadot_runtime_parachains::approvals_rewards, ApprovalsRewards] // Substrate [pallet_balances, Balances] [pallet_balances, NisCounterpartBalances] @@ -2104,6 +2111,13 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } + fn submit_approval_statistics( + payload: polkadot_primitives::vstaging::ApprovalStatistics, + signature: polkadot_primitives::ValidatorSignature + ) { + parachains_staging_runtime_api_impl::submit_approval_statistics::(payload, signature) + } + fn pvfs_require_precheck() -> Vec { parachains_runtime_api_impl::pvfs_require_precheck::() } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b39e53ad75e91..a12634891f631 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -62,6 +62,7 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + vstaging::ApprovalStatistics, }; use polkadot_runtime_common::{ assigned_slots, auctions, crowdloan, @@ -90,6 +91,7 @@ use polkadot_runtime_parachains::{ }, scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, + approvals_rewards as parachains_approvals_rewards, }; use scale_info::TypeInfo; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; @@ -1444,6 +1446,11 @@ impl parachains_paras::Config for Runtime { >; } +impl parachains_approvals_rewards::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // type WeightInfo = weights::polkadot_runtime_parachains_inclusion::WeightInfo; +} + parameter_types! { /// Amount of weight that can be spent per block to service messages. /// @@ -1978,6 +1985,8 @@ mod runtime { pub type OnDemandAssignmentProvider = parachains_on_demand; #[runtime::pallet_index(57)] pub type CoretimeAssignmentProvider = parachains_assigner_coretime; + #[runtime::pallet_index(58)] + pub type ApprovalsRewards = parachains_approvals_rewards; // Parachain Onboarding Pallets. Start indices at 60 to leave room. #[runtime::pallet_index(60)] @@ -2142,6 +2151,7 @@ mod benches { [polkadot_runtime_parachains::paras_inherent, ParaInherent] [polkadot_runtime_parachains::on_demand, OnDemandAssignmentProvider] [polkadot_runtime_parachains::coretime, Coretime] + [polkadot_runtime_parachains::approvals_rewards, ApprovalsRewards] // Substrate [pallet_bags_list, VoterList] [pallet_balances, Balances] @@ -2352,6 +2362,13 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } + fn submit_approval_statistics( + payload: ApprovalStatistics, + signature: ValidatorSignature, + ) { + parachains_staging_runtime_api_impl::submit_approval_statistics::(payload, signature) + } + fn pvfs_require_precheck() -> Vec { parachains_runtime_api_impl::pvfs_require_precheck::() } diff --git a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs index e28cfb4039303..df43e1a3551de 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs @@ -10,3 +10,5 @@ mod shared_core_idle_parachain; mod spam_statement_distribution_requests; mod sync_backing; mod validator_disabling; +mod rewards_statistics_collector; +mod rewards_statistics_mixed_validators; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_collector.rs b/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_collector.rs new file mode 100644 index 0000000000000..34cedc008ef8a --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_collector.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that nodes fetch availability chunks early for scheduled cores and normally for occupied +// core. + +use std::ops::Range; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_nth_session_change, report_label_with_attributes, assert_finality_lag}; +use polkadot_primitives::{Id as ParaId, SessionIndex}; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt::blocks::Block; +use zombienet_orchestrator::network::Network; +use zombienet_orchestrator::network::node::NetworkNode; +use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder}; + +#[tokio::test(flavor = "multi_thread")] +async fn rewards_statistics_collector_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![ + ("-lparachain=debug").into(), + ("--verbose-approval-metrics=true").into(), + ]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "group_rotation_frequency": 4 + } + } + } + })) + .with_node(|node| node.with_name("validator-0")); + + (1..12) + .fold(r, |acc, i| + acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("adder-collator") + .with_default_image( + std::env::var("COL_IMAGE") + .unwrap_or("docker.io/paritypr/colander:latest".to_string()) + .as_str(), + ) + .cumulus_based(false) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_collator(|n| n.with_name("collator-adder-2000")) + }) + .with_parachain(|p| { + p.with_id(2001) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec![("-lparachain=debug,aura=debug").into()]) + .with_collator(|n| n.with_name("collator-2001")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; + + assert_para_throughput( + &relay_client, + 15, + [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)] + .into_iter() + .collect(), + ) + .await?; + + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. We can only do this for the collator based on cumulus. + assert_finality_lag(&relay_client, 6).await?; + + assert_approval_usages_medians( + 1, + 0..12, + &network, + ).await?; + + Ok(()) +} + +async fn assert_approval_usages_medians( + session: SessionIndex, + validators_range: Range, + network: &Network, +) -> Result<(), anyhow::Error> { + let mut medians = vec![]; + + for idx in validators_range.clone() { + let validator_identifier = format!("validator-{}", idx); + let relay_node = network.get_node(validator_identifier.clone())?; + + let approvals_per_session = + report_label_with_attributes( + "polkadot_parachain_rewards_statistics_collector_approvals_per_session", + vec![ + ("session", session.to_string().as_str()), + ("chain", "rococo_local_testnet"), + ], + ); + + let total_approvals = relay_node.reports(approvals_per_session.clone()).await?; + + let mut metrics = vec![]; + for validator_idx in validators_range.clone() { + let approvals_per_session_per_validator = + report_label_with_attributes( + "polkadot_parachain_rewards_statistics_collector_approvals_per_session_per_validator", + vec![ + ("session", session.to_string().as_str()), + ("validator_idx", validator_idx.to_string().as_str()), + ("chain", "rococo_local_testnet"), + ], + ); + metrics.push(approvals_per_session_per_validator); + } + + let mut total_sum = 0; + for metric_per_validator in metrics { + let validator_approvals_usage = relay_node.reports(metric_per_validator.clone()).await?; + total_sum += validator_approvals_usage as u32; + } + + assert_eq!(total_sum, total_approvals as u32); + medians.push(total_sum / validators_range.len() as u32); + } + + log::info!("Collected medians for session {session} {:?}", medians); + Ok(()) +} \ No newline at end of file diff --git a/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_mixed_validators.rs b/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_mixed_validators.rs new file mode 100644 index 0000000000000..cf918cff6e03a --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/rewards_statistics_mixed_validators.rs @@ -0,0 +1,252 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that nodes fetch availability chunks early for scheduled cores and normally for occupied +// core. + +use std::collections::HashMap; +use std::ops::Range; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, wait_for_nth_session_change, report_label_with_attributes, assert_finality_lag, wait_for_first_session_change, find_event_and_decode_fields}; +use polkadot_primitives::{CandidateReceiptV2, Id as ParaId, SessionIndex}; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt::blocks::Block; +use zombienet_orchestrator::network::Network; +use zombienet_orchestrator::network::node::NetworkNode; +use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder}; +use pallet_revive::H256; + +#[tokio::test(flavor = "multi_thread")] +async fn rewards_statistics_mixed_validators_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 1, + "group_rotation_frequency": 4 + }, + } + } + })) + .with_node(|node| node.with_name("validator-0")); + + let r = (1..9) + .fold(r, |acc, i| + acc.with_node(|node| node.with_name(&format!("validator-{i}")))); + + (9..12).fold(r, |acc, i| { + acc.with_node(|node| { + node.with_name(&format!("malus-{i}")) + .with_args(vec![ + "-lparachain=debug,MALUS=trace".into(), + "--no-hardware-benchmarks".into(), + "--insecure-validator-i-know-what-i-do".into(), + ]) + .with_command("malus") + .with_subcommand("dispute-ancestor") + .invulnerable(false) + }) + }) + }) + .with_parachain(|p| { + p.with_id(1000) + .with_default_command("adder-collator") + .with_default_image( + std::env::var("COL_IMAGE") + .unwrap_or("docker.io/paritypr/colander:latest".to_string()) + .as_str(), + ) + .cumulus_based(false) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_collator(|n| n.with_name("adder-collator-1000")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + assert_para_throughput_for_included_parablocks( + &relay_client, + 20, + [(polkadot_primitives::Id::from(1000), (10..30, 8..14))].into_iter().collect(), + ).await?; + + let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; + + //wait_for_nth_session_change(&mut blocks_sub, 1).await?; + + // Assert the parachain finalized block height is also on par with the number of backed + // candidates. We can only do this for the collator based on cumulus. + assert_finality_lag(&relay_client, 6).await?; + + assert_approval_usages_medians( + 1, + 12, + [("validator", 0..9), ("malus", 9..12)].into_iter().collect(), + &network, + ).await?; + + Ok(()) +} + +pub async fn assert_para_throughput_for_included_parablocks( + relay_client: &OnlineClient, + stop_after: u32, + expected_candidate_ranges: HashMap, Range)>, +) -> Result<(), anyhow::Error> { + let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; + let mut candidate_backed_count: HashMap = HashMap::new(); + let mut candidate_included_count: HashMap = HashMap::new(); + let mut current_block_count = 0; + + let valid_para_ids: Vec = expected_candidate_ranges.keys().cloned().collect(); + + // Wait for the first session, block production on the parachain will start after that. + wait_for_first_session_change(&mut blocks_sub).await?; + + while let Some(block) = blocks_sub.next().await { + let block = block?; + log::debug!("Finalized relay chain block {}", block.number()); + let events = block.events().await?; + let is_session_change = events.iter().any(|event| { + event.as_ref().is_ok_and(|event| { + event.pallet_name() == "Session" && event.variant_name() == "NewSession" + }) + }); + + // Do not count blocks with session changes, no backed blocks there. + if is_session_change { + continue; + } + + current_block_count += 1; + + let receipts_for_backed = find_event_and_decode_fields::>( + &events, + "ParaInclusion", + "CandidateBacked", + )?; + + for receipt in receipts_for_backed { + let para_id = receipt.descriptor.para_id(); + log::debug!("Block backed for para_id {para_id}"); + if !valid_para_ids.contains(¶_id) { + return Err(anyhow!("Invalid ParaId detected: {}", para_id)); + }; + *(candidate_backed_count.entry(para_id).or_default()) += 1; + } + + let receipts_for_included = find_event_and_decode_fields::>( + &events, + "ParaInclusion", + "CandidateIncluded", + )?; + + for receipt in receipts_for_included { + let para_id = receipt.descriptor.para_id(); + log::debug!("Block included for para_id {para_id}"); + if !valid_para_ids.contains(¶_id) { + return Err(anyhow!("Invalid ParaId detected: {}", para_id)); + }; + *(candidate_included_count.entry(para_id).or_default()) += 1; + } + + if current_block_count == stop_after { + break; + } + } + + log::info!( + "Reached {stop_after} finalized relay chain blocks that contain backed/included candidates. The per-parachain distribution is: {:#?} {:#?}", + candidate_backed_count.iter().map(|(para_id, count)| format!("{para_id} has {count} backed candidates"),).collect::>(), + candidate_included_count.iter().map(|(para_id, count)| format!("{para_id} has {count} included candidates"),).collect::>() + ); + + for (para_id, expected_candidate_range) in expected_candidate_ranges { + let actual_backed = candidate_backed_count + .get(¶_id) + .ok_or_else(|| anyhow!("ParaId did not have any backed candidates"))?; + + let actual_included = candidate_included_count + .get(¶_id) + .ok_or_else(|| anyhow!("ParaId did not have any included candidates"))?; + + if !expected_candidate_range.0.contains(actual_backed) { + let range = expected_candidate_range.0; + return Err(anyhow!( + "Candidate Backed count {actual_backed} not within range {range:?}" + )) + } + + if !expected_candidate_range.1.contains(actual_included) { + let range = expected_candidate_range.1; + return Err(anyhow!( + "Candidate Included count {actual_included} not within range {range:?}" + )) + } + } + + Ok(()) +} + +async fn assert_approval_usages_medians( + session: SessionIndex, + num_validators: usize, + validators_kind_and_range: Vec<(&str, Range)>, + network: &Network, +) -> Result<(), anyhow::Error> { + for (kind, validators_range) in validators_kind_and_range { + for idx in validators_range { + let validator_identifier = format!("{}-{}", kind, idx); + let relay_node = network.get_node(validator_identifier)?; + + let approvals_per_session = + report_label_with_attributes( + "polkadot_parachain_rewards_statistics_collector_approvals_per_session", + vec![ + ("session", session.to_string().as_str()), + ("chain", "rococo_local_testnet"), + ], + ); + + let noshows_per_session = report_label_with_attributes( + "polkadot_parachain_rewards_statistics_collector_no_shows_per_session", + vec![ + ("session", session.to_string().as_str()), + ("chain", "rococo_local_testnet"), + ], + ); + + let total_approvals = relay_node.reports(approvals_per_session).await?; + let total_noshows = relay_node.reports(noshows_per_session).await?; + + log::info!("Session {session}: {kind} #{idx} (Approvals: {total_approvals}, Noshows: {total_noshows}) "); + + assert!(total_approvals >= 9.0); + assert!(total_noshows >= 3.0); + } + } + + Ok(()) +} \ No newline at end of file diff --git a/substrate/frame/staking-async/runtimes/rc/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/src/lib.rs index 53edb28ceccd9..a35cb4a5ed342 100644 --- a/substrate/frame/staking-async/runtimes/rc/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/rc/src/lib.rs @@ -2287,6 +2287,13 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) } + fn submit_approval_statistic( + payload: ApprovalStatistic, + signature: ValidatorSignature, + ) { + parachains_staging_runtime_api_impl::submit_approval_statistics::(payload, signature) + } + fn pvfs_require_precheck() -> Vec { parachains_runtime_api_impl::pvfs_require_precheck::() }