diff --git a/pallets/subtensor/src/migrations/migrate_fix_epoch_input.rs b/pallets/subtensor/src/migrations/migrate_fix_epoch_input.rs new file mode 100644 index 000000000..107e6e4ff --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_fix_epoch_input.rs @@ -0,0 +1,71 @@ +use super::*; +use frame_support::{ + pallet_prelude::{Identity, OptionQuery}, + storage_alias, + traits::Get, + weights::Weight, +}; +use log::info; +use sp_std::vec::Vec; + +const LOG_TARGET_1: &str = "migrate_fix_epoch_input"; + +/// Remove duplicate instances when one hotkey appears more than onces in Keys map +pub fn deduplicate_hotkeys() -> Weight { + let mut weight = T::DbWeight::get().reads(0); + // Iterate over Keys map, detect duplicate hotkey + // and build the list of uids that need to be removed + + // For each uid to be removed swap all neuron maps with the highest uid and + // deregister the neuron (to prevent gaps in uid numbers in the subnet) + + + weight +} + +/// Fix any known inconsistensies in epoch input data +/// This migration should execute regularly +pub fn migrate_fix_epoch_input() -> Weight { + // Setup migration weight + let mut weight = T::DbWeight::get().reads(1); + let migration_name = "Fix epoch input data"; + + info!(target: LOG_TARGET_1, ">>> Starting Migration: {migration_name}"); + + // // Iterate through all Owner entries + // Owner::::iter().for_each(|(hotkey, coldkey)| { + // storage_reads = storage_reads.saturating_add(1); // Read from Owner storage + // let mut hotkeys = OwnedHotkeys::::get(&coldkey); + // storage_reads = storage_reads.saturating_add(1); // Read from OwnedHotkeys storage + + // // Add the hotkey if it's not already in the vector + // if !hotkeys.contains(&hotkey) { + // hotkeys.push(hotkey); + // keys_touched = keys_touched.saturating_add(1); + + // // Update longest hotkey vector info + // if longest_hotkey_vector < hotkeys.len() { + // longest_hotkey_vector = hotkeys.len(); + // longest_coldkey = Some(coldkey.clone()); + // } + + // // Update the OwnedHotkeys storage + // OwnedHotkeys::::insert(&coldkey, hotkeys); + // storage_writes = storage_writes.saturating_add(1); // Write to OwnedHotkeys storage + // } + + // // Accrue weight for reads and writes + // weight = weight.saturating_add(T::DbWeight::get().reads_writes(2, 1)); + // }); + + // Log migration results + // info!( + // target: LOG_TARGET_1, + // "Migration {migration_name} finished. Keys touched: {keys_touched}, Longest hotkey vector: {longest_hotkey_vector}, Storage reads: {storage_reads}, Storage writes: {storage_writes}" + // ); + // if let Some(c) = longest_coldkey { + // info!(target: LOG_TARGET_1, "Longest hotkey vector is controlled by: {c:?}"); + // } + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index e7c50c008..4546b8e8b 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -14,6 +14,7 @@ pub mod migrate_crv3_v2_to_timelocked; pub mod migrate_delete_subnet_21; pub mod migrate_delete_subnet_3; pub mod migrate_disable_commit_reveal; +pub mod migrate_fix_epoch_input; pub mod migrate_fix_is_network_member; pub mod migrate_fix_root_subnet_tao; pub mod migrate_fix_root_tao_and_alpha_in; diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index b68fabfbd..7ebf9c3bb 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -131,6 +131,168 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } + pub fn trim_clear_neuron_state(netuid: NetUid, neuron_uid: u16, mechanisms_count: u8) -> Weight { + let mut weight = T::DbWeight::get().reads(1); + + // Remove hotkey related storage items if hotkey exists + if let Ok(hotkey) = Keys::::try_get(netuid, neuron_uid) { + Uids::::remove(netuid, &hotkey); + IsNetworkMember::::remove(&hotkey, netuid); + LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); + AlphaDividendsPerSubnet::::remove(netuid, &hotkey); + TaoDividendsPerSubnet::::remove(netuid, &hotkey); + Axons::::remove(netuid, &hotkey); + NeuronCertificates::::remove(netuid, &hotkey); + Prometheus::::remove(netuid, &hotkey); + + weight = weight.saturating_add(T::DbWeight::get().reads_writes(0, 8)); + } + + // Remove all storage items associated with this uid + #[allow(unknown_lints)] + Keys::::remove(netuid, neuron_uid); + BlockAtRegistration::::remove(netuid, neuron_uid); + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + Weights::::remove(netuid_index, neuron_uid); + Bonds::::remove(netuid_index, neuron_uid); + } + weight = weight.saturating_add(T::DbWeight::get().reads_writes(0, (mechanisms_count as u64).saturating_mul(2).saturating_add(2))); + + weight + } + + pub fn make_uids_consecutive(netuid: NetUid, mechanisms_count: u8, trimmed_uids: Vec) -> Weight { + let mut weight = T::DbWeight::get().reads(0); + + // Get all current arrays from storage + let emissions = Emission::::get(netuid); + let ranks = Rank::::get(netuid); + let trust = Trust::::get(netuid); + let active = Active::::get(netuid); + let consensus = Consensus::::get(netuid); + let dividends = Dividends::::get(netuid); + let pruning_scores = PruningScores::::get(netuid); + let vtrust = ValidatorTrust::::get(netuid); + let vpermit = ValidatorPermit::::get(netuid); + let stake_weight = StakeWeight::::get(netuid); + + // Create trimmed arrays by extracting values for kept uids only + // Pre-allocate vectors with exact capacity for efficiency + let len = trimmed_uids.len(); + let mut trimmed_emissions = Vec::with_capacity(len); + let mut trimmed_ranks = Vec::with_capacity(len); + let mut trimmed_trust = Vec::with_capacity(len); + let mut trimmed_active = Vec::with_capacity(len); + let mut trimmed_consensus = Vec::with_capacity(len); + let mut trimmed_dividends = Vec::with_capacity(len); + let mut trimmed_pruning_scores = Vec::with_capacity(len); + let mut trimmed_vtrust = Vec::with_capacity(len); + let mut trimmed_vpermit = Vec::with_capacity(len); + let mut trimmed_stake_weight = Vec::with_capacity(len); + + // Single iteration to extract values for all kept uids + for &uid in &trimmed_uids { + trimmed_emissions.push(emissions.get(uid).cloned().unwrap_or_default()); + trimmed_ranks.push(ranks.get(uid).cloned().unwrap_or_default()); + trimmed_trust.push(trust.get(uid).cloned().unwrap_or_default()); + trimmed_active.push(active.get(uid).cloned().unwrap_or_default()); + trimmed_consensus.push(consensus.get(uid).cloned().unwrap_or_default()); + trimmed_dividends.push(dividends.get(uid).cloned().unwrap_or_default()); + trimmed_pruning_scores.push(pruning_scores.get(uid).cloned().unwrap_or_default()); + trimmed_vtrust.push(vtrust.get(uid).cloned().unwrap_or_default()); + trimmed_vpermit.push(vpermit.get(uid).cloned().unwrap_or_default()); + trimmed_stake_weight.push(stake_weight.get(uid).cloned().unwrap_or_default()); + } + + // Update storage with trimmed arrays + Emission::::insert(netuid, trimmed_emissions); + Rank::::insert(netuid, trimmed_ranks); + Trust::::insert(netuid, trimmed_trust); + Active::::insert(netuid, trimmed_active); + Consensus::::insert(netuid, trimmed_consensus); + Dividends::::insert(netuid, trimmed_dividends); + PruningScores::::insert(netuid, trimmed_pruning_scores); + ValidatorTrust::::insert(netuid, trimmed_vtrust); + ValidatorPermit::::insert(netuid, trimmed_vpermit); + StakeWeight::::insert(netuid, trimmed_stake_weight); + + // Update incentives/lastupdates for mechanisms + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + let incentive = Incentive::::get(netuid_index); + let lastupdate = LastUpdate::::get(netuid_index); + let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); + + for uid in &trimmed_uids { + trimmed_incentive.push(incentive.get(*uid).cloned().unwrap_or_default()); + trimmed_lastupdate.push(lastupdate.get(*uid).cloned().unwrap_or_default()); + } + + Incentive::::insert(netuid_index, trimmed_incentive); + LastUpdate::::insert(netuid_index, trimmed_lastupdate); + } + + // Create mapping from old uid to new compressed uid + // This is needed to update connections (weights and bonds) with correct uid references + let old_to_new_uid: BTreeMap = trimmed_uids + .iter() + .enumerate() + .map(|(new_uid, &old_uid)| (old_uid, new_uid)) + .collect(); + + // Update connections (weights and bonds) for each kept uid + // This involves three operations per uid: + // 1. Swap the uid storage to the new compressed position + // 2. Update all connections to reference the new compressed uids + // 3. Clear the connections to the trimmed uids + for (old_uid, new_uid) in &old_to_new_uid { + let old_neuron_uid = *old_uid as u16; + let new_neuron_uid = *new_uid as u16; + + // Swap uid specific storage items to new compressed positions + Keys::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + BlockAtRegistration::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); + + // Swap to new position and remap all target uids + Weights::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Weights::::mutate(netuid_index, new_neuron_uid, |weights| { + weights.retain_mut(|(target_uid, _weight)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + + // Swap to new position and remap all target uids + Bonds::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Bonds::::mutate(netuid_index, new_neuron_uid, |bonds| { + bonds.retain_mut(|(target_uid, _bond)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + } + } + + weight + } + pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { // Reasonable limits ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); @@ -193,27 +355,7 @@ impl Pallet { continue; } - // Remove hotkey related storage items if hotkey exists - if let Ok(hotkey) = Keys::::try_get(netuid, neuron_uid) { - Uids::::remove(netuid, &hotkey); - IsNetworkMember::::remove(&hotkey, netuid); - LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); - AlphaDividendsPerSubnet::::remove(netuid, &hotkey); - TaoDividendsPerSubnet::::remove(netuid, &hotkey); - Axons::::remove(netuid, &hotkey); - NeuronCertificates::::remove(netuid, &hotkey); - Prometheus::::remove(netuid, &hotkey); - } - - // Remove all storage items associated with this uid - #[allow(unknown_lints)] - Keys::::remove(netuid, neuron_uid); - BlockAtRegistration::::remove(netuid, neuron_uid); - for mecid in 0..mechanisms_count { - let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); - Weights::::remove(netuid_index, neuron_uid); - Bonds::::remove(netuid_index, neuron_uid); - } + Self::trim_clear_neuron_state(netuid, neuron_uid, mechanisms_count); // Remove from emissions array and track as removed emissions.remove(i.into()); @@ -227,130 +369,10 @@ impl Pallet { emissions.sort_by_key(|(uid, _)| *uid); // Extract the final uids and emissions after trimming and sorting - let (trimmed_uids, trimmed_emissions): (Vec, Vec) = + let (trimmed_uids, _trimmed_emissions): (Vec, Vec) = emissions.into_iter().unzip(); - // Get all current arrays from storage - let ranks = Rank::::get(netuid); - let trust = Trust::::get(netuid); - let active = Active::::get(netuid); - let consensus = Consensus::::get(netuid); - let dividends = Dividends::::get(netuid); - let pruning_scores = PruningScores::::get(netuid); - let vtrust = ValidatorTrust::::get(netuid); - let vpermit = ValidatorPermit::::get(netuid); - let stake_weight = StakeWeight::::get(netuid); - - // Create trimmed arrays by extracting values for kept uids only - // Pre-allocate vectors with exact capacity for efficiency - let len = trimmed_uids.len(); - let mut trimmed_ranks = Vec::with_capacity(len); - let mut trimmed_trust = Vec::with_capacity(len); - let mut trimmed_active = Vec::with_capacity(len); - let mut trimmed_consensus = Vec::with_capacity(len); - let mut trimmed_dividends = Vec::with_capacity(len); - let mut trimmed_pruning_scores = Vec::with_capacity(len); - let mut trimmed_vtrust = Vec::with_capacity(len); - let mut trimmed_vpermit = Vec::with_capacity(len); - let mut trimmed_stake_weight = Vec::with_capacity(len); - - // Single iteration to extract values for all kept uids - for &uid in &trimmed_uids { - trimmed_ranks.push(ranks.get(uid).cloned().unwrap_or_default()); - trimmed_trust.push(trust.get(uid).cloned().unwrap_or_default()); - trimmed_active.push(active.get(uid).cloned().unwrap_or_default()); - trimmed_consensus.push(consensus.get(uid).cloned().unwrap_or_default()); - trimmed_dividends.push(dividends.get(uid).cloned().unwrap_or_default()); - trimmed_pruning_scores.push(pruning_scores.get(uid).cloned().unwrap_or_default()); - trimmed_vtrust.push(vtrust.get(uid).cloned().unwrap_or_default()); - trimmed_vpermit.push(vpermit.get(uid).cloned().unwrap_or_default()); - trimmed_stake_weight.push(stake_weight.get(uid).cloned().unwrap_or_default()); - } - - // Update storage with trimmed arrays - Emission::::insert(netuid, trimmed_emissions); - Rank::::insert(netuid, trimmed_ranks); - Trust::::insert(netuid, trimmed_trust); - Active::::insert(netuid, trimmed_active); - Consensus::::insert(netuid, trimmed_consensus); - Dividends::::insert(netuid, trimmed_dividends); - PruningScores::::insert(netuid, trimmed_pruning_scores); - ValidatorTrust::::insert(netuid, trimmed_vtrust); - ValidatorPermit::::insert(netuid, trimmed_vpermit); - StakeWeight::::insert(netuid, trimmed_stake_weight); - - // Update incentives/lastupdates for mechanisms - for mecid in 0..mechanisms_count { - let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); - let incentive = Incentive::::get(netuid_index); - let lastupdate = LastUpdate::::get(netuid_index); - let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); - - for uid in &trimmed_uids { - trimmed_incentive.push(incentive.get(*uid).cloned().unwrap_or_default()); - trimmed_lastupdate.push(lastupdate.get(*uid).cloned().unwrap_or_default()); - } - - Incentive::::insert(netuid_index, trimmed_incentive); - LastUpdate::::insert(netuid_index, trimmed_lastupdate); - } - - // Create mapping from old uid to new compressed uid - // This is needed to update connections (weights and bonds) with correct uid references - let old_to_new_uid: BTreeMap = trimmed_uids - .iter() - .enumerate() - .map(|(new_uid, &old_uid)| (old_uid, new_uid)) - .collect(); - - // Update connections (weights and bonds) for each kept uid - // This involves three operations per uid: - // 1. Swap the uid storage to the new compressed position - // 2. Update all connections to reference the new compressed uids - // 3. Clear the connections to the trimmed uids - for (old_uid, new_uid) in &old_to_new_uid { - let old_neuron_uid = *old_uid as u16; - let new_neuron_uid = *new_uid as u16; - - // Swap uid specific storage items to new compressed positions - Keys::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); - BlockAtRegistration::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); - - for mecid in 0..mechanisms_count { - let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); - - // Swap to new position and remap all target uids - Weights::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); - Weights::::mutate(netuid_index, new_neuron_uid, |weights| { - weights.retain_mut(|(target_uid, _weight)| { - if let Some(new_target_uid) = - old_to_new_uid.get(&(*target_uid as usize)) - { - *target_uid = *new_target_uid as u16; - true - } else { - false - } - }) - }); - - // Swap to new position and remap all target uids - Bonds::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); - Bonds::::mutate(netuid_index, new_neuron_uid, |bonds| { - bonds.retain_mut(|(target_uid, _bond)| { - if let Some(new_target_uid) = - old_to_new_uid.get(&(*target_uid as usize)) - { - *target_uid = *new_target_uid as u16; - true - } else { - false - } - }) - }); - } - } + Self::make_uids_consecutive(netuid, mechanisms_count, trimmed_uids); // Update the subnet's uid count to reflect the new maximum SubnetworkN::::insert(netuid, max_n);