From 0178b88495a7a595e4790be2c2a38e95367829a1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 26 May 2025 11:52:00 -0700 Subject: [PATCH 001/379] impl get_network_to_prune --- pallets/subtensor/src/subnets/subnet.rs | 36 +++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index b122bfa049..bf7ada34d8 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -440,4 +440,40 @@ impl Pallet { pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { FirstEmissionBlockNumber::::get(netuid).is_some() } + + pub fn get_network_to_prune() -> Option { + let current_block: u64 = Self::get_current_block_as_u64(); + let total_networks: u16 = TotalNetworks::::get(); + + let mut candidate_netuid: Option = None; + let mut candidate_emission = u64::MAX; + let mut candidate_timestamp = u64::MAX; + + for netuid in 1..=total_networks { + if FirstEmissionBlockNumber::::get(netuid).is_none() { + continue; + } + + let registered_at = NetworkRegisteredAt::::get(netuid); + let immunity_period = ImmunityPeriod::::get(netuid); + if current_block < registered_at.saturating_add(immunity_period as u64) { + continue; + } + + // We want total emission across all UIDs in this subnet: + let emission_vec = Emission::::get(netuid); + let total_emission = emission_vec.iter().sum::(); + + // If tie on total_emission, earliest registration wins + if total_emission < candidate_emission + || (total_emission == candidate_emission && registered_at < candidate_timestamp) + { + candidate_netuid = Some(netuid); + candidate_emission = total_emission; + candidate_timestamp = registered_at; + } + } + + candidate_netuid + } } From 562823c73e7403196a3a799a045cb97fbf91c005 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 27 May 2025 11:54:17 -0700 Subject: [PATCH 002/379] use NetworkImmunityPeriod --- pallets/subtensor/src/subnets/subnet.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index bf7ada34d8..1f6524e2b2 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -455,7 +455,7 @@ impl Pallet { } let registered_at = NetworkRegisteredAt::::get(netuid); - let immunity_period = ImmunityPeriod::::get(netuid); + let immunity_period = Self::get_network_immunity_period(); if current_block < registered_at.saturating_add(immunity_period as u64) { continue; } From b2ef90e55e8290e13a8ba6ac2462a96bf0326910 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 27 May 2025 13:45:56 -0700 Subject: [PATCH 003/379] update get_network_to_prune. --- pallets/subtensor/src/subnets/subnet.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 1f6524e2b2..a04ae1df07 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -441,6 +441,11 @@ impl Pallet { FirstEmissionBlockNumber::::get(netuid).is_some() } + /// Select a subnet to prune: + /// - Only consider subnets that are Enabled. + /// - Exclude subnets still within `ImmunityPeriod`. + /// - Pick the one with the lowest total emission + /// - In the case of a tie, pick the earliest registered. pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); let total_networks: u16 = TotalNetworks::::get(); @@ -450,19 +455,22 @@ impl Pallet { let mut candidate_timestamp = u64::MAX; for netuid in 1..=total_networks { - if FirstEmissionBlockNumber::::get(netuid).is_none() { - continue; - } + // Exclude disabled subnets + let first_emission_block = match FirstEmissionBlockNumber::::get(netuid) { + Some(block) => block, + None => continue, + }; - let registered_at = NetworkRegisteredAt::::get(netuid); + // Check if the subnet's immunity period is expired. let immunity_period = Self::get_network_immunity_period(); - if current_block < registered_at.saturating_add(immunity_period as u64) { + if current_block < first_emission_block.saturating_add(immunity_period as u64) { continue; } // We want total emission across all UIDs in this subnet: let emission_vec = Emission::::get(netuid); let total_emission = emission_vec.iter().sum::(); + let registered_at = NetworkRegisteredAt::::get(netuid); // If tie on total_emission, earliest registration wins if total_emission < candidate_emission From d569309fdd0a65d1dfafc0d3fbaa88bac370fca5 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 27 May 2025 14:39:41 -0700 Subject: [PATCH 004/379] add `NetworkActivationDeadline ` --- pallets/subtensor/src/coinbase/root.rs | 3 +++ pallets/subtensor/src/lib.rs | 9 +++++++++ pallets/subtensor/src/subnets/subnet.rs | 19 ++++++++++++++----- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 1f3a91b339..e8d529309d 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -627,6 +627,9 @@ impl Pallet { pub fn get_network_immunity_period() -> u64 { NetworkImmunityPeriod::::get() } + pub fn get_network_activation_deadline() -> u64 { + NetworkActivationDeadline::::get() + } pub fn set_network_immunity_period(net_immunity_period: u64) { NetworkImmunityPeriod::::set(net_immunity_period); Self::deposit_event(Event::NetworkImmunityPeriodSet(net_immunity_period)); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 197cd5f8f7..8e148bee69 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -556,6 +556,11 @@ pub mod pallet { T::InitialNetworkImmunityPeriod::get() } #[pallet::type_value] + /// Default value for network activation deadline. + pub fn DefaultNetworkActivationDeadline() -> u64 { + 1_296_000 + } + #[pallet::type_value] /// Default value for network last registered. pub fn DefaultNetworkLastRegistered() -> u64 { 0 @@ -1194,6 +1199,10 @@ pub mod pallet { pub type NetworkImmunityPeriod = StorageValue<_, u64, ValueQuery, DefaultNetworkImmunityPeriod>; #[pallet::storage] + /// ITEM( network_activation_deadline ) + pub type NetworkActivationDeadline = + StorageValue<_, u64, ValueQuery, DefaultNetworkActivationDeadline>; + #[pallet::storage] /// ITEM( network_last_registered_block ) pub type NetworkLastRegistered = StorageValue<_, u64, ValueQuery, DefaultNetworkLastRegistered>; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index a04ae1df07..c60689d23d 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -455,22 +455,31 @@ impl Pallet { let mut candidate_timestamp = u64::MAX; for netuid in 1..=total_networks { - // Exclude disabled subnets - let first_emission_block = match FirstEmissionBlockNumber::::get(netuid) { + let registered_at = NetworkRegisteredAt::::get(netuid); + + let start_block = match FirstEmissionBlockNumber::::get(netuid) { Some(block) => block, - None => continue, + None => { + // Not enabled yet. If still within ActivationDeadline, skip pruning this subnet. + if current_block + < registered_at.saturating_add(Self::get_network_activation_deadline()) + { + continue; + } + // Otherwise, we treat it as if it started at its registered time + registered_at + } }; // Check if the subnet's immunity period is expired. let immunity_period = Self::get_network_immunity_period(); - if current_block < first_emission_block.saturating_add(immunity_period as u64) { + if current_block < start_block.saturating_add(immunity_period as u64) { continue; } // We want total emission across all UIDs in this subnet: let emission_vec = Emission::::get(netuid); let total_emission = emission_vec.iter().sum::(); - let registered_at = NetworkRegisteredAt::::get(netuid); // If tie on total_emission, earliest registration wins if total_emission < candidate_emission From 8016183fbdbafb44bcbb31f5416e372e2a191ed2 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 2 Jun 2025 08:35:21 -0700 Subject: [PATCH 005/379] WIP --- pallets/subtensor/src/coinbase/root.rs | 219 +++++++++++++++------ pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/subnets/subnet.rs | 53 ----- pallets/subtensor/src/tests/networks.rs | 3 +- 4 files changed, 162 insertions(+), 115 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index e8d529309d..218137b8eb 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -22,7 +22,9 @@ use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; use sp_std::vec; -use substrate_fixed::types::I64F64; +use sp_runtime::Perbill; +use substrate_fixed::types::{I64F64, U96F32}; +use sp_runtime::PerThing; impl Pallet { /// Fetches the total count of root network validators @@ -427,64 +429,25 @@ impl Pallet { .into()) } - /// Facilitates the removal of a user's subnetwork. - /// - /// # Args: - /// * 'origin': ('T::RuntimeOrigin'): The calling origin. Must be signed. - /// * 'netuid': ('u16'): The unique identifier of the network to be removed. - /// - /// # Event: - /// * 'NetworkRemoved': Emitted when a network is successfully removed. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': If the specified network does not exist. - /// * 'NotSubnetOwner': If the caller does not own the specified subnet. - /// - pub fn user_remove_network(coldkey: T::AccountId, netuid: u16) -> dispatch::DispatchResult { - // --- 1. Ensure this subnet exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); - - // --- 2. Ensure the caller owns this subnet. - ensure!( - SubnetOwner::::get(netuid) == coldkey, - Error::::NotSubnetOwner - ); - - // --- 4. Remove the subnet identity if it exists. - if SubnetIdentitiesV2::::take(netuid).is_some() { - Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); - } + pub fn do_dissolve_network(netuid: u16) -> dispatch::DispatchResult { + // --- Perform the dtTao-compatible cleanup before removing the network. + Self::destroy_alpha_in_out_stakes(netuid)?; - // --- 5. Explicitly erase the network and all its parameters. + // --- Finally, remove the network entirely. + ensure!(Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist); Self::remove_network(netuid); - // --- 6. Emit the NetworkRemoved event. + // --- Emit event. log::debug!("NetworkRemoved( netuid:{:?} )", netuid); Self::deposit_event(Event::NetworkRemoved(netuid)); - // --- 7. Return success. Ok(()) } - /// Removes a network (identified by netuid) and all associated parameters. - /// - /// This function is responsible for cleaning up all the data associated with a network. - /// It ensures that all the storage values related to the network are removed, any - /// reserved balance is returned to the network owner, and the subnet identity is removed if it exists. - /// - /// # Args: - /// * 'netuid': ('u16'): The unique identifier of the network to be removed. - /// - /// # Note: - /// This function does not emit any events, nor does it raise any errors. It silently - /// returns if any internal checks fail. pub fn remove_network(netuid: u16) { - // --- 1. Return balance to subnet owner. + // --- 1. Get the owner and remove from SubnetOwner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); - let reserved_amount: u64 = Self::get_subnet_locked_balance(netuid); + SubnetOwner::::remove(netuid); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -507,28 +470,26 @@ impl Pallet { let _ = Keys::::clear_prefix(netuid, u32::MAX, None); let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - // --- 8. Removes the weights for this subnet (do not remove). + // --- 8. Remove the weights for this subnet itself. let _ = Weights::::clear_prefix(netuid, u32::MAX, None); - // --- 9. Iterate over stored weights and fill the matrix. + // --- 9. Also zero out any weights *in the root network* that point to this netuid. for (uid_i, weights_i) in as IterableStorageDoubleMap>>::iter_prefix( Self::get_root_netuid(), ) { - // Create a new vector to hold modified weights. let mut modified_weights: Vec<(u16, u16)> = weights_i.clone(); - // Iterate over each weight entry to potentially update it. for (subnet_id, weight) in modified_weights.iter_mut() { + // If the root network had a weight pointing to this netuid, set it to 0 if subnet_id == &netuid { - // If the condition matches, modify the weight - *weight = 0; // Set weight to 0 for the matching subnet_id. + *weight = 0; } } Weights::::insert(Self::get_root_netuid(), uid_i, modified_weights); } - // --- 10. Remove various network-related parameters. + // --- 10. Remove various network-related parameters and data. Rank::::remove(netuid); Trust::::remove(netuid); Active::::remove(netuid); @@ -558,16 +519,26 @@ impl Pallet { POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - // --- 12. Add the balance back to the owner. - Self::add_balance_to_coldkey_account(&owner_coldkey, reserved_amount); - Self::set_subnet_locked_balance(netuid, 0); - SubnetOwner::::remove(netuid); + // --- 12. Remove additional dTao-related storages if applicable. + SubnetTAO::::remove(netuid); + SubnetAlphaInEmission::::remove(netuid); + SubnetAlphaOutEmission::::remove(netuid); + SubnetTaoInEmission::::remove(netuid); + SubnetVolume::::remove(netuid); + SubnetMovingPrice::::remove(netuid); // --- 13. Remove subnet identity if it exists. if SubnetIdentitiesV2::::contains_key(netuid) { SubnetIdentitiesV2::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); } + + // --- Log final removal. + log::debug!( + "remove_network: netuid={}, owner={:?} removed successfully", + netuid, + owner_coldkey + ); } #[allow(clippy::arithmetic_side_effects)] @@ -674,4 +645,134 @@ impl Pallet { pub fn set_rate_limited_last_block(rate_limit_key: &RateLimitKey, block: u64) { LastRateLimitedBlock::::set(rate_limit_key, block); } + + fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { + // 1. Ensure the subnet exists. + ensure!(Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist); + + // 2. Gather relevant info. + let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); + let lock_cost: u64 = Self::get_subnet_locked_balance(netuid); + + // (Optional) Grab total emission in Tao. + let emission_vec = Emission::::get(netuid); + let total_emission: u64 = emission_vec.iter().sum(); + + // The portion the owner received is total_emission * owner_cut (stored as fraction in U96F32). + let owner_fraction = Self::get_float_subnet_owner_cut(); + let owner_received_emission = (U96F32::from_num(total_emission) * owner_fraction) + .floor() + .saturating_to_num::(); + + // 3. Destroy α stakes and distribute remaining subnet Tao to α-out stakers (pro rata). + let mut total_alpha_out: u128 = 0; + let mut stakers_data = Vec::new(); + + // (A) First pass: sum total alpha-out for this netuid. + for ((hotkey, coldkey, this_netuid), alpha_shares) in Alpha::::iter() { + if this_netuid == netuid { + // alpha_shares is U64F64; convert to u128 for ratio math + let alpha_as_u128 = alpha_shares.saturating_to_num::(); + total_alpha_out = total_alpha_out.saturating_add(alpha_as_u128); + stakers_data.push((hotkey, coldkey, alpha_as_u128)); + } + } + + // (B) Second pass: distribute the subnet’s Tao among those stakers. + let subnet_tao = SubnetTAO::::get(netuid); + + if total_alpha_out > 0 { + let accuracy_as_u128 = u128::from(Perbill::ACCURACY); + + for (hotkey, coldkey, alpha_amount) in stakers_data { + let scaled = alpha_amount + .saturating_mul(accuracy_as_u128) + .checked_div(total_alpha_out) + .unwrap_or(0); + + // Clamp to avoid overflow beyond the Perbill limit (which is a 1.0 fraction). + let clamped = if scaled > accuracy_as_u128 { + Perbill::ACCURACY + } else { + scaled as u32 + }; + + // Construct a Perbill from these parts + let fraction = Perbill::from_parts(clamped); + + // Multiply fraction by subnet_tao to get the staker’s share (u64). + let tao_share = fraction * subnet_tao; + + // Credit the coldkey (or hotkey, depending on your design). + Self::add_balance_to_coldkey_account(&coldkey, tao_share); + + // Remove these alpha shares. + Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); + } + } + + // Clear any leftover alpha in/out accumulations. + SubnetAlphaIn::::insert(netuid, 0); + SubnetAlphaOut::::insert(netuid, 0); + + // 4. Calculate partial refund = max(0, lock_cost - owner_received_emission). + let final_refund = lock_cost.saturating_sub(owner_received_emission).max(0); + + // 5. Set the locked balance on this subnet to 0, then credit the final_refund. + Self::set_subnet_locked_balance(netuid, 0); + + if final_refund > 0 { + Self::add_balance_to_coldkey_account(&owner_coldkey, final_refund); + } + + Ok(()) + } + + pub fn get_network_to_prune() -> Option { + let current_block: u64 = Self::get_current_block_as_u64(); + let total_networks: u16 = TotalNetworks::::get(); + + let mut candidate_netuid: Option = None; + let mut candidate_emission = u64::MAX; + let mut candidate_timestamp = u64::MAX; + + for netuid in 1..=total_networks { + let registered_at = NetworkRegisteredAt::::get(netuid); + + let start_block = match FirstEmissionBlockNumber::::get(netuid) { + Some(block) => block, + None => { + // Not enabled yet. If still within ActivationDeadline, skip pruning this subnet. + if current_block + < registered_at.saturating_add(Self::get_network_activation_deadline()) + { + continue; + } + // Otherwise, we treat it as if it started at its registered time + registered_at + } + }; + + // Check if the subnet's immunity period is expired. + let immunity_period = Self::get_network_immunity_period(); + if current_block < start_block.saturating_add(immunity_period as u64) { + continue; + } + + // We want total emission across all UIDs in this subnet: + let emission_vec = Emission::::get(netuid); + let total_emission = emission_vec.iter().sum::(); + + // If tie on total_emission, earliest registration wins + if total_emission < candidate_emission + || (total_emission == candidate_emission && registered_at < candidate_timestamp) + { + candidate_netuid = Some(netuid); + candidate_emission = total_emission; + candidate_timestamp = registered_at; + } + } + + candidate_netuid + } } diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 650fb50451..0a0d9083f5 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1231,7 +1231,7 @@ mod dispatches { netuid: u16, ) -> DispatchResult { ensure_root(origin)?; - Self::user_remove_network(coldkey, netuid) + Self::do_dissolve_network(netuid) } /// Set a single child for a given hotkey on a specified network. diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index c60689d23d..b122bfa049 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -440,57 +440,4 @@ impl Pallet { pub fn is_valid_subnet_for_emission(netuid: u16) -> bool { FirstEmissionBlockNumber::::get(netuid).is_some() } - - /// Select a subnet to prune: - /// - Only consider subnets that are Enabled. - /// - Exclude subnets still within `ImmunityPeriod`. - /// - Pick the one with the lowest total emission - /// - In the case of a tie, pick the earliest registered. - pub fn get_network_to_prune() -> Option { - let current_block: u64 = Self::get_current_block_as_u64(); - let total_networks: u16 = TotalNetworks::::get(); - - let mut candidate_netuid: Option = None; - let mut candidate_emission = u64::MAX; - let mut candidate_timestamp = u64::MAX; - - for netuid in 1..=total_networks { - let registered_at = NetworkRegisteredAt::::get(netuid); - - let start_block = match FirstEmissionBlockNumber::::get(netuid) { - Some(block) => block, - None => { - // Not enabled yet. If still within ActivationDeadline, skip pruning this subnet. - if current_block - < registered_at.saturating_add(Self::get_network_activation_deadline()) - { - continue; - } - // Otherwise, we treat it as if it started at its registered time - registered_at - } - }; - - // Check if the subnet's immunity period is expired. - let immunity_period = Self::get_network_immunity_period(); - if current_block < start_block.saturating_add(immunity_period as u64) { - continue; - } - - // We want total emission across all UIDs in this subnet: - let emission_vec = Emission::::get(netuid); - let total_emission = emission_vec.iter().sum::(); - - // If tie on total_emission, earliest registration wins - if total_emission < candidate_emission - || (total_emission == candidate_emission && registered_at < candidate_timestamp) - { - candidate_netuid = Some(netuid); - candidate_emission = total_emission; - candidate_timestamp = registered_at; - } - } - - candidate_netuid - } } diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 7dda0502c1..0a157c3a5c 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -32,8 +32,7 @@ fn test_registration_ok() { coldkey_account_id )); - assert_ok!(SubtensorModule::user_remove_network( - coldkey_account_id, + assert_ok!(SubtensorModule::do_dissolve_network( netuid )); From 000c3d0a1b420a52ac1af555d76d203ad83a4933 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 2 Jun 2025 11:08:03 -0700 Subject: [PATCH 006/379] remove NetworkActivationDeadline --- pallets/subtensor/src/coinbase/root.rs | 81 ++++++++++--------------- pallets/subtensor/src/lib.rs | 9 --- pallets/subtensor/src/tests/networks.rs | 4 +- 3 files changed, 33 insertions(+), 61 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 218137b8eb..dd8206e281 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -21,10 +21,10 @@ use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; -use sp_std::vec; +use sp_runtime::PerThing; use sp_runtime::Perbill; +use sp_std::vec; use substrate_fixed::types::{I64F64, U96F32}; -use sp_runtime::PerThing; impl Pallet { /// Fetches the total count of root network validators @@ -434,7 +434,10 @@ impl Pallet { Self::destroy_alpha_in_out_stakes(netuid)?; // --- Finally, remove the network entirely. - ensure!(Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist); + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); Self::remove_network(netuid); // --- Emit event. @@ -489,7 +492,7 @@ impl Pallet { Weights::::insert(Self::get_root_netuid(), uid_i, modified_weights); } - // --- 10. Remove various network-related parameters and data. + // --- 10. Remove network-related parameters and data. Rank::::remove(netuid); Trust::::remove(netuid); Active::::remove(netuid); @@ -505,8 +508,6 @@ impl Pallet { for (_uid, key) in keys { IsNetworkMember::::remove(key, netuid); } - - // --- 11. Erase network parameters. Tempo::::remove(netuid); Kappa::::remove(netuid); Difficulty::::remove(netuid); @@ -518,8 +519,6 @@ impl Pallet { RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - - // --- 12. Remove additional dTao-related storages if applicable. SubnetTAO::::remove(netuid); SubnetAlphaInEmission::::remove(netuid); SubnetAlphaOutEmission::::remove(netuid); @@ -527,7 +526,6 @@ impl Pallet { SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); - // --- 13. Remove subnet identity if it exists. if SubnetIdentitiesV2::::contains_key(netuid) { SubnetIdentitiesV2::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); @@ -598,9 +596,6 @@ impl Pallet { pub fn get_network_immunity_period() -> u64 { NetworkImmunityPeriod::::get() } - pub fn get_network_activation_deadline() -> u64 { - NetworkActivationDeadline::::get() - } pub fn set_network_immunity_period(net_immunity_period: u64) { NetworkImmunityPeriod::::set(net_immunity_period); Self::deposit_event(Event::NetworkImmunityPeriodSet(net_immunity_period)); @@ -648,26 +643,29 @@ impl Pallet { fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { // 1. Ensure the subnet exists. - ensure!(Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist); - + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + // 2. Gather relevant info. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let lock_cost: u64 = Self::get_subnet_locked_balance(netuid); - + // (Optional) Grab total emission in Tao. let emission_vec = Emission::::get(netuid); let total_emission: u64 = emission_vec.iter().sum(); - + // The portion the owner received is total_emission * owner_cut (stored as fraction in U96F32). let owner_fraction = Self::get_float_subnet_owner_cut(); let owner_received_emission = (U96F32::from_num(total_emission) * owner_fraction) .floor() .saturating_to_num::(); - + // 3. Destroy α stakes and distribute remaining subnet Tao to α-out stakers (pro rata). let mut total_alpha_out: u128 = 0; let mut stakers_data = Vec::new(); - + // (A) First pass: sum total alpha-out for this netuid. for ((hotkey, coldkey, this_netuid), alpha_shares) in Alpha::::iter() { if this_netuid == netuid { @@ -677,54 +675,54 @@ impl Pallet { stakers_data.push((hotkey, coldkey, alpha_as_u128)); } } - + // (B) Second pass: distribute the subnet’s Tao among those stakers. let subnet_tao = SubnetTAO::::get(netuid); - + if total_alpha_out > 0 { let accuracy_as_u128 = u128::from(Perbill::ACCURACY); - + for (hotkey, coldkey, alpha_amount) in stakers_data { let scaled = alpha_amount .saturating_mul(accuracy_as_u128) .checked_div(total_alpha_out) .unwrap_or(0); - + // Clamp to avoid overflow beyond the Perbill limit (which is a 1.0 fraction). let clamped = if scaled > accuracy_as_u128 { Perbill::ACCURACY } else { scaled as u32 }; - + // Construct a Perbill from these parts let fraction = Perbill::from_parts(clamped); - + // Multiply fraction by subnet_tao to get the staker’s share (u64). let tao_share = fraction * subnet_tao; - + // Credit the coldkey (or hotkey, depending on your design). Self::add_balance_to_coldkey_account(&coldkey, tao_share); - + // Remove these alpha shares. Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); } } - + // Clear any leftover alpha in/out accumulations. SubnetAlphaIn::::insert(netuid, 0); SubnetAlphaOut::::insert(netuid, 0); - + // 4. Calculate partial refund = max(0, lock_cost - owner_received_emission). let final_refund = lock_cost.saturating_sub(owner_received_emission).max(0); - + // 5. Set the locked balance on this subnet to 0, then credit the final_refund. Self::set_subnet_locked_balance(netuid, 0); - + if final_refund > 0 { Self::add_balance_to_coldkey_account(&owner_coldkey, final_refund); } - + Ok(()) } @@ -739,23 +737,8 @@ impl Pallet { for netuid in 1..=total_networks { let registered_at = NetworkRegisteredAt::::get(netuid); - let start_block = match FirstEmissionBlockNumber::::get(netuid) { - Some(block) => block, - None => { - // Not enabled yet. If still within ActivationDeadline, skip pruning this subnet. - if current_block - < registered_at.saturating_add(Self::get_network_activation_deadline()) - { - continue; - } - // Otherwise, we treat it as if it started at its registered time - registered_at - } - }; - - // Check if the subnet's immunity period is expired. - let immunity_period = Self::get_network_immunity_period(); - if current_block < start_block.saturating_add(immunity_period as u64) { + // Skip immune networks + if current_block < registered_at.saturating_add(Self::get_network_immunity_period()) { continue; } @@ -774,5 +757,5 @@ impl Pallet { } candidate_netuid - } + } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 8e148bee69..197cd5f8f7 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -556,11 +556,6 @@ pub mod pallet { T::InitialNetworkImmunityPeriod::get() } #[pallet::type_value] - /// Default value for network activation deadline. - pub fn DefaultNetworkActivationDeadline() -> u64 { - 1_296_000 - } - #[pallet::type_value] /// Default value for network last registered. pub fn DefaultNetworkLastRegistered() -> u64 { 0 @@ -1199,10 +1194,6 @@ pub mod pallet { pub type NetworkImmunityPeriod = StorageValue<_, u64, ValueQuery, DefaultNetworkImmunityPeriod>; #[pallet::storage] - /// ITEM( network_activation_deadline ) - pub type NetworkActivationDeadline = - StorageValue<_, u64, ValueQuery, DefaultNetworkActivationDeadline>; - #[pallet::storage] /// ITEM( network_last_registered_block ) pub type NetworkLastRegistered = StorageValue<_, u64, ValueQuery, DefaultNetworkLastRegistered>; diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 0a157c3a5c..c9bb787437 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -32,9 +32,7 @@ fn test_registration_ok() { coldkey_account_id )); - assert_ok!(SubtensorModule::do_dissolve_network( - netuid - )); + assert_ok!(SubtensorModule::do_dissolve_network(netuid)); assert!(!SubtensorModule::if_subnet_exist(netuid)) }) From dfe72d5cbfaee7d6684197865434c45f5503b527 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 2 Jun 2025 11:18:37 -0700 Subject: [PATCH 007/379] update InitialNetworkImmunityPeriod --- pallets/admin-utils/src/tests/mock.rs | 2 +- pallets/subtensor/src/tests/mock.rs | 2 +- runtime/src/lib.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index f8b3e6a9b6..b625145410 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -120,7 +120,7 @@ parameter_types! { pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake - pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; + pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 221d802ccd..dbabfb926f 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -172,7 +172,7 @@ parameter_types! { pub const InitialMaxDifficulty: u64 = u64::MAX; pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake - pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; + pub const InitialNetworkImmunityPeriod: u64 = 1_296_000; pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 95b032f9e6..f4af9956be 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1073,7 +1073,7 @@ parameter_types! { pub const SubtensorInitialTxChildKeyTakeRateLimit: u64 = INITIAL_CHILDKEY_TAKE_RATELIMIT; pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake - pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; + pub const SubtensorInitialNetworkImmunity: u64 = 1_296_000; pub const SubtensorInitialMinAllowedUids: u16 = 128; pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent From 2676c6326361d9007d43ec880509a067f6ddfd37 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 5 Jun 2025 12:50:54 -0700 Subject: [PATCH 008/379] add dissolve_network tests --- pallets/subtensor/src/tests/networks.rs | 293 +++++++++++++++++++++++- 1 file changed, 292 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index c9bb787437..57bc32c713 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1,8 +1,9 @@ use super::mock::*; use crate::*; -use frame_support::assert_ok; +use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; +use substrate_fixed::types::U64F64; #[test] fn test_registration_ok() { @@ -38,6 +39,296 @@ fn test_registration_ok() { }) } +#[test] +fn dissolve_no_stakers_no_alpha_no_emission() { + new_test_ext(0).execute_with(|| { + let cold = U256::from(1); + let hot = U256::from(2); + let net = add_dynamic_network(&hot, &cold); + + SubtensorModule::set_subnet_locked_balance(net, 0); + SubnetTAO::::insert(net, 0); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&cold); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&cold); + + // Balance should be unchanged (whatever the network-lock bookkeeping left there) + assert_eq!(after, before); + assert!(!SubtensorModule::if_subnet_exist(net)); + }); +} + +#[test] +fn dissolve_refunds_full_lock_cost_when_no_emission() { + new_test_ext(0).execute_with(|| { + let cold = U256::from(3); + let hot = U256::from(4); + let net = add_dynamic_network(&hot, &cold); + + let lock = 1_000_000u64; + SubtensorModule::set_subnet_locked_balance(net, lock); + SubnetTAO::::insert(net, 0); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&cold); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&cold); + + assert_eq!(after, before + lock); + }); +} + +#[test] +fn dissolve_single_alpha_out_staker_gets_all_tao() { + new_test_ext(0).execute_with(|| { + let owner_cold = U256::from(10); + let owner_hot = U256::from(20); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + let s_hot = U256::from(100); + let s_cold = U256::from(200); + + Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); + SubnetTAO::::insert(net, 99_999); + SubtensorModule::set_subnet_locked_balance(net, 0); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&s_cold); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&s_cold); + + assert_eq!(after, before + 99_999); + assert!(Alpha::::iter().count() == 0); + }); +} + +#[test] +fn dissolve_two_stakers_pro_rata_distribution() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(50); + let oh = U256::from(51); + let net = add_dynamic_network(&oh, &oc); + + // stakers α-out + let (s1_hot, s1_cold, a1) = (U256::from(201), U256::from(301), 300u128); + let (s2_hot, s2_cold, a2) = (U256::from(202), U256::from(302), 700u128); + + Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); + Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); + + SubnetTAO::::insert(net, 10_000); + SubtensorModule::set_subnet_locked_balance(net, 5_000); + Emission::::insert(net, Vec::::new()); + + let b1 = SubtensorModule::get_coldkey_balance(&s1_cold); + let b2 = SubtensorModule::get_coldkey_balance(&s2_cold); + let bo = SubtensorModule::get_coldkey_balance(&oc); + + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + let total = a1 + a2; + let share1: u64 = (10_000u128 * a1 / total) as u64; + let share2: u64 = (10_000u128 * a2 / total) as u64; + + assert_eq!(SubtensorModule::get_coldkey_balance(&s1_cold), b1 + share1); + assert_eq!(SubtensorModule::get_coldkey_balance(&s2_cold), b2 + share2); + assert_eq!(SubtensorModule::get_coldkey_balance(&oc), bo + 5_000); + }); +} + +#[test] +fn dissolve_owner_cut_refund_logic() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(70); + let oh = U256::from(71); + let net = add_dynamic_network(&oh, &oc); + + // staker + let sh = U256::from(77); + let sc = U256::from(88); + Alpha::::insert((sh, sc, net), U64F64::from_num(100u128)); + SubnetTAO::::insert(net, 1_000); + + // lock & emission + let lock = 2_000; + SubtensorModule::set_subnet_locked_balance(net, lock); + Emission::::insert(net, vec![200u64, 600]); + + // 18 % owner-cut + SubnetOwnerCut::::put(11_796u16); + let frac = 11_796f64 / 65_535f64; + let owner_em = (800f64 * frac).floor() as u64; + let expect = lock.saturating_sub(owner_em); + + let before = SubtensorModule::get_coldkey_balance(&oc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&oc); + + assert_eq!(after, before + expect); + }); +} + +#[test] +fn dissolve_zero_refund_when_emission_exceeds_lock() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(1_000); + let oh = U256::from(2_000); + let net = add_dynamic_network(&oh, &oc); + + SubtensorModule::set_subnet_locked_balance(net, 1_000); + SubnetOwnerCut::::put(u16::MAX); // 100 % + Emission::::insert(net, vec![2_000u64]); + + let before = SubtensorModule::get_coldkey_balance(&oc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&oc); + + assert_eq!(after, before); // no refund + }); +} + +#[test] +fn dissolve_nonexistent_subnet_fails() { + new_test_ext(0).execute_with(|| { + assert_err!( + SubtensorModule::do_dissolve_network(9_999), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn dissolve_clears_all_per_subnet_storages() { + new_test_ext(0).execute_with(|| { + + let owner_cold = U256::from(123); + let owner_hot = U256::from(456); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + // ------------------------------------------------------------------ + // Populate each storage item with a minimal value of the CORRECT type + // ------------------------------------------------------------------ + SubnetOwner::::insert(net, owner_cold); + SubnetworkN::::insert(net, 0u16); + NetworkModality::::insert(net, 0u16); + NetworksAdded::::insert(net, true); + NetworkRegisteredAt::::insert(net, 0u64); + + Rank::::insert(net, vec![1u16]); + Trust::::insert(net, vec![1u16]); + Active::::insert(net, vec![true]); + Emission::::insert(net, vec![1u64]); + Incentive::::insert(net, vec![1u16]); + Consensus::::insert(net, vec![1u16]); + Dividends::::insert(net, vec![1u16]); + PruningScores::::insert(net, vec![1u16]); + LastUpdate::::insert(net, vec![0u64]); + + ValidatorPermit::::insert(net, vec![true]); + ValidatorTrust::::insert(net, vec![1u16]); + + Tempo::::insert(net, 1u16); + Kappa::::insert(net, 1u16); + Difficulty::::insert(net, 1u64); + + MaxAllowedUids::::insert(net, 1u16); + ImmunityPeriod::::insert(net, 1u16); + ActivityCutoff::::insert(net, 1u16); + MaxWeightsLimit::::insert(net, 1u16); + MinAllowedWeights::::insert(net, 1u16); + + RegistrationsThisInterval::::insert(net, 1u16); + POWRegistrationsThisInterval::::insert(net, 1u16); + BurnRegistrationsThisInterval::::insert(net, 1u16); + + SubnetTAO::::insert(net, 1u64); + SubnetAlphaInEmission::::insert(net, 1u64); + SubnetAlphaOutEmission::::insert(net, 1u64); + SubnetTaoInEmission::::insert(net, 1u64); + SubnetVolume::::insert(net, 1u128); + + // Fields that will be ZEROED (not removed) + SubnetAlphaIn::::insert(net, 2u64); + SubnetAlphaOut::::insert(net, 3u64); + + // Prefix / double-map collections + Keys::::insert(net, 0u16, owner_hot); + Bonds::::insert(net, 0u16, vec![(0u16, 1u16)]); + Weights::::insert(net, 0u16, vec![(1u16, 1u16)]); + IsNetworkMember::::insert(owner_cold, net, true); + + // ------------------------------------------------------------------ + // Dissolve + // ------------------------------------------------------------------ + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // ------------------------------------------------------------------ + // Items that must be COMPLETELY REMOVED + // ------------------------------------------------------------------ + assert!(!SubnetOwner::::contains_key(net)); + assert!(!SubnetworkN::::contains_key(net)); + assert!(!NetworkModality::::contains_key(net)); + assert!(!NetworksAdded::::contains_key(net)); + assert!(!NetworkRegisteredAt::::contains_key(net)); + + assert!(!Rank::::contains_key(net)); + assert!(!Trust::::contains_key(net)); + assert!(!Active::::contains_key(net)); + assert!(!Emission::::contains_key(net)); + assert!(!Incentive::::contains_key(net)); + assert!(!Consensus::::contains_key(net)); + assert!(!Dividends::::contains_key(net)); + assert!(!PruningScores::::contains_key(net)); + assert!(!LastUpdate::::contains_key(net)); + + assert!(!ValidatorPermit::::contains_key(net)); + assert!(!ValidatorTrust::::contains_key(net)); + + assert!(!Tempo::::contains_key(net)); + assert!(!Kappa::::contains_key(net)); + assert!(!Difficulty::::contains_key(net)); + + assert!(!MaxAllowedUids::::contains_key(net)); + assert!(!ImmunityPeriod::::contains_key(net)); + assert!(!ActivityCutoff::::contains_key(net)); + assert!(!MaxWeightsLimit::::contains_key(net)); + assert!(!MinAllowedWeights::::contains_key(net)); + + assert!(!RegistrationsThisInterval::::contains_key(net)); + assert!(!POWRegistrationsThisInterval::::contains_key(net)); + assert!(!BurnRegistrationsThisInterval::::contains_key(net)); + + assert!(!SubnetTAO::::contains_key(net)); + assert!(!SubnetAlphaInEmission::::contains_key(net)); + assert!(!SubnetAlphaOutEmission::::contains_key(net)); + assert!(!SubnetTaoInEmission::::contains_key(net)); + assert!(!SubnetVolume::::contains_key(net)); + + // ------------------------------------------------------------------ + // Items expected to be PRESENT but ZERO + // ------------------------------------------------------------------ + assert_eq!(SubnetAlphaIn::::get(net), 0); + assert_eq!(SubnetAlphaOut::::get(net), 0); + + // ------------------------------------------------------------------ + // Collections fully cleared + // ------------------------------------------------------------------ + assert!(Keys::::iter_prefix(net).next().is_none()); + assert!(Bonds::::iter_prefix(net).next().is_none()); + assert!(Weights::::iter_prefix(net).next().is_none()); + assert!(!IsNetworkMember::::contains_key(owner_hot, net)); + + // ------------------------------------------------------------------ + // Final subnet removal confirmation + // ------------------------------------------------------------------ + assert!(!SubtensorModule::if_subnet_exist(net)); + }); +} + + + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From a4c78f3bf3196f4191875b387ff28c3d44f53843 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 5 Jun 2025 12:51:10 -0700 Subject: [PATCH 009/379] fmt --- pallets/subtensor/src/tests/networks.rs | 41 ++++++++++++------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 57bc32c713..67ee34437e 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -43,8 +43,8 @@ fn test_registration_ok() { fn dissolve_no_stakers_no_alpha_no_emission() { new_test_ext(0).execute_with(|| { let cold = U256::from(1); - let hot = U256::from(2); - let net = add_dynamic_network(&hot, &cold); + let hot = U256::from(2); + let net = add_dynamic_network(&hot, &cold); SubtensorModule::set_subnet_locked_balance(net, 0); SubnetTAO::::insert(net, 0); @@ -52,7 +52,7 @@ fn dissolve_no_stakers_no_alpha_no_emission() { let before = SubtensorModule::get_coldkey_balance(&cold); assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&cold); + let after = SubtensorModule::get_coldkey_balance(&cold); // Balance should be unchanged (whatever the network-lock bookkeeping left there) assert_eq!(after, before); @@ -64,8 +64,8 @@ fn dissolve_no_stakers_no_alpha_no_emission() { fn dissolve_refunds_full_lock_cost_when_no_emission() { new_test_ext(0).execute_with(|| { let cold = U256::from(3); - let hot = U256::from(4); - let net = add_dynamic_network(&hot, &cold); + let hot = U256::from(4); + let net = add_dynamic_network(&hot, &cold); let lock = 1_000_000u64; SubtensorModule::set_subnet_locked_balance(net, lock); @@ -74,7 +74,7 @@ fn dissolve_refunds_full_lock_cost_when_no_emission() { let before = SubtensorModule::get_coldkey_balance(&cold); assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&cold); + let after = SubtensorModule::get_coldkey_balance(&cold); assert_eq!(after, before + lock); }); @@ -84,10 +84,10 @@ fn dissolve_refunds_full_lock_cost_when_no_emission() { fn dissolve_single_alpha_out_staker_gets_all_tao() { new_test_ext(0).execute_with(|| { let owner_cold = U256::from(10); - let owner_hot = U256::from(20); - let net = add_dynamic_network(&owner_hot, &owner_cold); + let owner_hot = U256::from(20); + let net = add_dynamic_network(&owner_hot, &owner_cold); - let s_hot = U256::from(100); + let s_hot = U256::from(100); let s_cold = U256::from(200); Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); @@ -97,7 +97,7 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { let before = SubtensorModule::get_coldkey_balance(&s_cold); assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&s_cold); + let after = SubtensorModule::get_coldkey_balance(&s_cold); assert_eq!(after, before + 99_999); assert!(Alpha::::iter().count() == 0); @@ -134,7 +134,7 @@ fn dissolve_two_stakers_pro_rata_distribution() { assert_eq!(SubtensorModule::get_coldkey_balance(&s1_cold), b1 + share1); assert_eq!(SubtensorModule::get_coldkey_balance(&s2_cold), b2 + share2); - assert_eq!(SubtensorModule::get_coldkey_balance(&oc), bo + 5_000); + assert_eq!(SubtensorModule::get_coldkey_balance(&oc), bo + 5_000); }); } @@ -146,8 +146,8 @@ fn dissolve_owner_cut_refund_logic() { let net = add_dynamic_network(&oh, &oc); // staker - let sh = U256::from(77); - let sc = U256::from(88); + let sh = U256::from(77); + let sc = U256::from(88); Alpha::::insert((sh, sc, net), U64F64::from_num(100u128)); SubnetTAO::::insert(net, 1_000); @@ -158,13 +158,13 @@ fn dissolve_owner_cut_refund_logic() { // 18 % owner-cut SubnetOwnerCut::::put(11_796u16); - let frac = 11_796f64 / 65_535f64; + let frac = 11_796f64 / 65_535f64; let owner_em = (800f64 * frac).floor() as u64; - let expect = lock.saturating_sub(owner_em); + let expect = lock.saturating_sub(owner_em); let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&oc); + let after = SubtensorModule::get_coldkey_balance(&oc); assert_eq!(after, before + expect); }); @@ -183,7 +183,7 @@ fn dissolve_zero_refund_when_emission_exceeds_lock() { let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&oc); + let after = SubtensorModule::get_coldkey_balance(&oc); assert_eq!(after, before); // no refund }); @@ -202,10 +202,9 @@ fn dissolve_nonexistent_subnet_fails() { #[test] fn dissolve_clears_all_per_subnet_storages() { new_test_ext(0).execute_with(|| { - let owner_cold = U256::from(123); - let owner_hot = U256::from(456); - let net = add_dynamic_network(&owner_hot, &owner_cold); + let owner_hot = U256::from(456); + let net = add_dynamic_network(&owner_hot, &owner_cold); // ------------------------------------------------------------------ // Populate each storage item with a minimal value of the CORRECT type @@ -327,8 +326,6 @@ fn dissolve_clears_all_per_subnet_storages() { }); } - - // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From ac29e34c401b329b6bc5319c141b8a350db3e2fc Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 06:36:53 -0700 Subject: [PATCH 010/379] unused param --- pallets/subtensor/src/macros/dispatches.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 0a0d9083f5..d3db94c727 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1227,7 +1227,7 @@ mod dispatches { .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] pub fn dissolve_network( origin: OriginFor, - coldkey: T::AccountId, + _coldkey: T::AccountId, netuid: u16, ) -> DispatchResult { ensure_root(origin)?; From 17f098210dad02fa1e5cd5d36edb022d20f5c5d2 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 06:37:25 -0700 Subject: [PATCH 011/379] clean up destroy_alpha_in_out_stakes --- pallets/subtensor/src/coinbase/root.rs | 76 ++++++++++---------------- 1 file changed, 30 insertions(+), 46 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index dd8206e281..2a6dedeb75 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -21,8 +21,6 @@ use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; -use sp_runtime::PerThing; -use sp_runtime::Perbill; use sp_std::vec; use substrate_fixed::types::{I64F64, U96F32}; @@ -641,86 +639,72 @@ impl Pallet { LastRateLimitedBlock::::set(rate_limit_key, block); } - fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { + pub fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { // 1. Ensure the subnet exists. ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - // 2. Gather relevant info. + // 2. Gather basic info. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let lock_cost: u64 = Self::get_subnet_locked_balance(netuid); - // (Optional) Grab total emission in Tao. - let emission_vec = Emission::::get(netuid); - let total_emission: u64 = emission_vec.iter().sum(); - - // The portion the owner received is total_emission * owner_cut (stored as fraction in U96F32). + // How much Tao the subnet has emitted and what the owner already earned. + let total_emission: u64 = Emission::::get(netuid).iter().sum(); let owner_fraction = Self::get_float_subnet_owner_cut(); let owner_received_emission = (U96F32::from_num(total_emission) * owner_fraction) .floor() .saturating_to_num::(); - // 3. Destroy α stakes and distribute remaining subnet Tao to α-out stakers (pro rata). + // 3. Collect α-out staker data. let mut total_alpha_out: u128 = 0; - let mut stakers_data = Vec::new(); + let mut stakers = Vec::new(); - // (A) First pass: sum total alpha-out for this netuid. for ((hotkey, coldkey, this_netuid), alpha_shares) in Alpha::::iter() { if this_netuid == netuid { - // alpha_shares is U64F64; convert to u128 for ratio math - let alpha_as_u128 = alpha_shares.saturating_to_num::(); - total_alpha_out = total_alpha_out.saturating_add(alpha_as_u128); - stakers_data.push((hotkey, coldkey, alpha_as_u128)); + let amount = alpha_shares.saturating_to_num::(); + total_alpha_out = total_alpha_out.saturating_add(amount); + stakers.push((hotkey, coldkey, amount)); } } - // (B) Second pass: distribute the subnet’s Tao among those stakers. - let subnet_tao = SubnetTAO::::get(netuid); - - if total_alpha_out > 0 { - let accuracy_as_u128 = u128::from(Perbill::ACCURACY); + // 4. Distribute the subnet’s Tao pro-rata. + let subnet_tao_u128 = SubnetTAO::::get(netuid) as u128; - for (hotkey, coldkey, alpha_amount) in stakers_data { - let scaled = alpha_amount - .saturating_mul(accuracy_as_u128) + if total_alpha_out > 0 && subnet_tao_u128 > 0 { + for (hotkey, coldkey, alpha_amount) in &stakers { + // tao_share = subnet_tao * α / Σα + let share_u128 = subnet_tao_u128 + .saturating_mul(*alpha_amount) .checked_div(total_alpha_out) .unwrap_or(0); - // Clamp to avoid overflow beyond the Perbill limit (which is a 1.0 fraction). - let clamped = if scaled > accuracy_as_u128 { - Perbill::ACCURACY - } else { - scaled as u32 - }; - - // Construct a Perbill from these parts - let fraction = Perbill::from_parts(clamped); + let share_u64 = share_u128.min(u64::MAX as u128) as u64; - // Multiply fraction by subnet_tao to get the staker’s share (u64). - let tao_share = fraction * subnet_tao; - - // Credit the coldkey (or hotkey, depending on your design). - Self::add_balance_to_coldkey_account(&coldkey, tao_share); + if share_u64 > 0 { + Self::add_balance_to_coldkey_account(coldkey, share_u64); + } - // Remove these alpha shares. + Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); + } + } else { + // No α-out stakers: just clear any lingering records. + for (hotkey, coldkey, _) in &stakers { Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); } } - // Clear any leftover alpha in/out accumulations. + // 5. Reset α in/out accumulations. SubnetAlphaIn::::insert(netuid, 0); SubnetAlphaOut::::insert(netuid, 0); - // 4. Calculate partial refund = max(0, lock_cost - owner_received_emission). - let final_refund = lock_cost.saturating_sub(owner_received_emission).max(0); - - // 5. Set the locked balance on this subnet to 0, then credit the final_refund. + // 6. Refund any remaining lock (lock_cost − owner_cut already paid out). + let refund = lock_cost.saturating_sub(owner_received_emission); Self::set_subnet_locked_balance(netuid, 0); - if final_refund > 0 { - Self::add_balance_to_coldkey_account(&owner_coldkey, final_refund); + if refund > 0 { + Self::add_balance_to_coldkey_account(&owner_coldkey, refund); } Ok(()) From 4b03bc9730aa51a37a00f0070deec901b67bef27 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 08:26:31 -0700 Subject: [PATCH 012/379] update destroy_alpha_in_out_stakes --- pallets/subtensor/src/coinbase/root.rs | 83 ++++++++++++++++++-------- 1 file changed, 57 insertions(+), 26 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 2a6dedeb75..61cec45c9f 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -639,6 +639,14 @@ impl Pallet { LastRateLimitedBlock::::set(rate_limit_key, block); } + /// Burns **nothing**: every Tao in `SubnetTAO` is now paid out to + /// α-out stakers, including any units that would previously have been + /// lost to flooring. + /// + /// Rounding strategy + /// 1. First pass – give each staker `floor(T * α / Σα)` Tao. + /// 2. Second pass – distribute the *left-over* ( < #stakers ) one-by-one + /// to the stakers with the largest fractional remainders. pub fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { // 1. Ensure the subnet exists. ensure!( @@ -646,63 +654,86 @@ impl Pallet { Error::::SubNetworkDoesNotExist ); - // 2. Gather basic info. + // 2. Basic info. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let lock_cost: u64 = Self::get_subnet_locked_balance(netuid); - // How much Tao the subnet has emitted and what the owner already earned. + // Owner-cut already received from emissions. let total_emission: u64 = Emission::::get(netuid).iter().sum(); let owner_fraction = Self::get_float_subnet_owner_cut(); let owner_received_emission = (U96F32::from_num(total_emission) * owner_fraction) .floor() .saturating_to_num::(); - // 3. Collect α-out staker data. + // 3. Gather α-out stakers. let mut total_alpha_out: u128 = 0; - let mut stakers = Vec::new(); + let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); - for ((hotkey, coldkey, this_netuid), alpha_shares) in Alpha::::iter() { + for ((hot, cold, this_netuid), alpha) in Alpha::::iter() { if this_netuid == netuid { - let amount = alpha_shares.saturating_to_num::(); - total_alpha_out = total_alpha_out.saturating_add(amount); - stakers.push((hotkey, coldkey, amount)); + let a = alpha.saturating_to_num::(); + total_alpha_out = total_alpha_out.saturating_add(a); + stakers.push((hot, cold, a)); } } - // 4. Distribute the subnet’s Tao pro-rata. - let subnet_tao_u128 = SubnetTAO::::get(netuid) as u128; + // 4. Pro-rata distribution WITH remainder handling. + let subnet_tao: u128 = SubnetTAO::::get(netuid) as u128; - if total_alpha_out > 0 && subnet_tao_u128 > 0 { - for (hotkey, coldkey, alpha_amount) in &stakers { - // tao_share = subnet_tao * α / Σα - let share_u128 = subnet_tao_u128 - .saturating_mul(*alpha_amount) - .checked_div(total_alpha_out) - .unwrap_or(0); + if total_alpha_out > 0 && subnet_tao > 0 && !stakers.is_empty() { + struct Portion { + hot: A, + cold: C, + share: u64, + rem: u128, + } + let mut portions: Vec> = Vec::with_capacity(stakers.len()); + let mut distributed: u128 = 0; + for (hot, cold, a) in &stakers { + let prod = subnet_tao.saturating_mul(*a); + let share_u128 = prod / total_alpha_out; let share_u64 = share_u128.min(u64::MAX as u128) as u64; + distributed = distributed.saturating_add(share_u64 as u128); + + portions.push(Portion { + hot: hot.clone(), + cold: cold.clone(), + share: share_u64, + rem: prod % total_alpha_out, + }); + } - if share_u64 > 0 { - Self::add_balance_to_coldkey_account(coldkey, share_u64); + // Left-over units ( < stakers.len() ). + let leftover = subnet_tao.saturating_sub(distributed); + if leftover > 0 { + portions.sort_by(|a, b| b.rem.cmp(&a.rem)); + for p in portions.iter_mut().take(leftover as usize) { + p.share = p.share.saturating_add(1); } + } - Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); + // Final crediting and α-record cleanup. + for p in portions { + if p.share > 0 { + Self::add_balance_to_coldkey_account(&p.cold, p.share); + } + Alpha::::remove((&p.hot, &p.cold, netuid)); } } else { - // No α-out stakers: just clear any lingering records. - for (hotkey, coldkey, _) in &stakers { - Alpha::::remove((hotkey.clone(), coldkey.clone(), netuid)); + // No α-out or no Tao – just clear α-records. + for (hot, cold, _) in &stakers { + Alpha::::remove((hot.clone(), cold.clone(), netuid)); } } - // 5. Reset α in/out accumulations. + // 5. Reset α-in/out accumulations. SubnetAlphaIn::::insert(netuid, 0); SubnetAlphaOut::::insert(netuid, 0); - // 6. Refund any remaining lock (lock_cost − owner_cut already paid out). + // 6. Refund remaining lock. let refund = lock_cost.saturating_sub(owner_received_emission); Self::set_subnet_locked_balance(netuid, 0); - if refund > 0 { Self::add_balance_to_coldkey_account(&owner_coldkey, refund); } From 7c205a9b67fe0a0ee63db848691d5ebe69c55a4e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 08:26:44 -0700 Subject: [PATCH 013/379] add more tests --- pallets/subtensor/src/tests/networks.rs | 76 +++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 67ee34437e..94e7214ec0 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -326,6 +326,82 @@ fn dissolve_clears_all_per_subnet_storages() { }); } +#[test] +fn dissolve_alpha_out_but_zero_tao_no_rewards() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(21); + let oh = U256::from(22); + let net = add_dynamic_network(&oh, &oc); + + let sh = U256::from(23); + let sc = U256::from(24); + + Alpha::::insert((sh, sc, net), U64F64::from_num(1_000u128)); + SubnetTAO::::insert(net, 0u64); // zero TAO + SubtensorModule::set_subnet_locked_balance(net, 0); + Emission::::insert(net, Vec::::new()); + + let before = SubtensorModule::get_coldkey_balance(&sc); + assert_ok!(SubtensorModule::do_dissolve_network(net)); + let after = SubtensorModule::get_coldkey_balance(&sc); + + // No reward distributed, α-out cleared. + assert_eq!(after, before); + assert!(Alpha::::iter().next().is_none()); + }); +} + +#[test] +fn dissolve_decrements_total_networks() { + new_test_ext(0).execute_with(|| { + let total_before = TotalNetworks::::get(); + + let cold = U256::from(41); + let hot = U256::from(42); + let net = add_dynamic_network(&hot, &cold); + + // Sanity: adding network increments the counter. + assert_eq!(TotalNetworks::::get(), total_before + 1); + + assert_ok!(SubtensorModule::do_dissolve_network(net)); + assert_eq!(TotalNetworks::::get(), total_before); + }); +} + +#[test] +fn dissolve_rounding_remainder_distribution() { + new_test_ext(0).execute_with(|| { + let oc = U256::from(61); + let oh = U256::from(62); + let net = add_dynamic_network(&oh, &oc); + + // α-out stakes + let (s1h, s1c, a1) = (U256::from(63), U256::from(64), 3u128); + let (s2h, s2c, a2) = (U256::from(65), U256::from(66), 2u128); + + Alpha::::insert((s1h, s1c, net), U64F64::from_num(a1)); + Alpha::::insert((s2h, s2c, net), U64F64::from_num(a2)); + + // TAO pot = 1 + SubnetTAO::::insert(net, 1u64); + SubtensorModule::set_subnet_locked_balance(net, 0); + Emission::::insert(net, Vec::::new()); + + let b1 = SubtensorModule::get_coldkey_balance(&s1c); + let b2 = SubtensorModule::get_coldkey_balance(&s2c); + + assert_ok!(SubtensorModule::do_dissolve_network(net)); + + // s1 (larger remainder) receives the single Tao. + assert_eq!(SubtensorModule::get_coldkey_balance(&s1c), b1 + 1); + assert_eq!(SubtensorModule::get_coldkey_balance(&s2c), b2); + + // α-records cleared; TAO storage gone. + assert!(Alpha::::iter().next().is_none()); + assert!(!SubnetTAO::::contains_key(net)); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From f9483054e9e8dd49843040e6c7d9cd9ab1953425 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 09:00:12 -0700 Subject: [PATCH 014/379] add destroy_alpha_out_multiple_stakers_pro_rata --- pallets/subtensor/src/tests/networks.rs | 97 ++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 94e7214ec0..20bec813f3 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -371,8 +371,8 @@ fn dissolve_decrements_total_networks() { #[test] fn dissolve_rounding_remainder_distribution() { new_test_ext(0).execute_with(|| { - let oc = U256::from(61); - let oh = U256::from(62); + let oc = U256::from(61); + let oh = U256::from(62); let net = add_dynamic_network(&oh, &oc); // α-out stakes @@ -402,6 +402,99 @@ fn dissolve_rounding_remainder_distribution() { }); } +#[test] +fn destroy_alpha_out_multiple_stakers_pro_rata() { + new_test_ext(0).execute_with(|| { + // -------------------------------------------------- + // 1. Subnet owner + subnet creation + // -------------------------------------------------- + let owner_cold = U256::from(10); + let owner_hot = U256::from(20); + let net = add_dynamic_network(&owner_hot, &owner_cold); + + // -------------------------------------------------- + // 2. Two stakers – register hotkeys on the subnet + // -------------------------------------------------- + let (c1, h1) = (U256::from(111), U256::from(211)); + let (c2, h2) = (U256::from(222), U256::from(333)); + register_ok_neuron(net, h1, c1, 0); + register_ok_neuron(net, h2, c2, 0); + + // -------------------------------------------------- + // 3. Discover protocol-minimum amount (stake + fee) + // -------------------------------------------------- + let min_stake_total = + DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); + + // target α-ratio 30 : 70 + let s1 = 3 * min_stake_total; + let s2 = 7 * min_stake_total; + + // -------------------------------------------------- + // 4. Fund coldkeys sufficiently, then stake via extrinsic + // -------------------------------------------------- + SubtensorModule::add_balance_to_coldkey_account(&c1, s1 + 50_000); + SubtensorModule::add_balance_to_coldkey_account(&c2, s2 + 50_000); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(c1), + h1, + net, + s1 + )); + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(c2), + h2, + net, + s2 + )); + + // -------------------------------------------------- + // 5. α snapshot + // -------------------------------------------------- + let a1: u128 = Alpha::::get((h1, c1, net)).saturating_to_num(); + let a2: u128 = Alpha::::get((h2, c2, net)).saturating_to_num(); + let atotal = a1 + a2; + + // -------------------------------------------------- + // 6. TAO pot + subnet lock + // -------------------------------------------------- + let tao_pot: u64 = 10_000; + SubnetTAO::::insert(net, tao_pot); + SubtensorModule::set_subnet_locked_balance(net, 5_000); + Emission::::insert(net, Vec::::new()); // owner earned nothing + + // -------------------------------------------------- + // 7. Balances before distribution + // -------------------------------------------------- + let b1 = SubtensorModule::get_coldkey_balance(&c1); + let b2 = SubtensorModule::get_coldkey_balance(&c2); + let bo = SubtensorModule::get_coldkey_balance(&owner_cold); + + // -------------------------------------------------- + // 8. Execute payout logic + // -------------------------------------------------- + assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(net)); + + // -------------------------------------------------- + // 9. Expected shares + // -------------------------------------------------- + let share1: u64 = (tao_pot as u128 * a1 / atotal) as u64; + let share2: u64 = tao_pot - share1; + + // -------------------------------------------------- + // 10. Assertions + // -------------------------------------------------- + assert_eq!(SubtensorModule::get_coldkey_balance(&c1), b1 + share1); + assert_eq!(SubtensorModule::get_coldkey_balance(&c2), b2 + share2); + assert_eq!( + SubtensorModule::get_coldkey_balance(&owner_cold), + bo + 5_000 + ); + assert!(Alpha::::iter().next().is_none()); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From 39f77cf84d0b1f176145bb4aae6f49ce667da1ca Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 09:24:27 -0700 Subject: [PATCH 015/379] add test destroy_alpha_out_many_stakers_complex_distribution --- pallets/subtensor/src/tests/networks.rs | 126 +++++++++++++++++++++--- 1 file changed, 115 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 20bec813f3..ad9aea6002 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -410,15 +410,15 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { // -------------------------------------------------- let owner_cold = U256::from(10); let owner_hot = U256::from(20); - let net = add_dynamic_network(&owner_hot, &owner_cold); + let netuid = add_dynamic_network(&owner_hot, &owner_cold); // -------------------------------------------------- // 2. Two stakers – register hotkeys on the subnet // -------------------------------------------------- let (c1, h1) = (U256::from(111), U256::from(211)); let (c2, h2) = (U256::from(222), U256::from(333)); - register_ok_neuron(net, h1, c1, 0); - register_ok_neuron(net, h2, c2, 0); + register_ok_neuron(netuid, h1, c1, 0); + register_ok_neuron(netuid, h2, c2, 0); // -------------------------------------------------- // 3. Discover protocol-minimum amount (stake + fee) @@ -439,30 +439,30 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { assert_ok!(SubtensorModule::do_add_stake( RuntimeOrigin::signed(c1), h1, - net, + netuid, s1 )); assert_ok!(SubtensorModule::do_add_stake( RuntimeOrigin::signed(c2), h2, - net, + netuid, s2 )); // -------------------------------------------------- // 5. α snapshot // -------------------------------------------------- - let a1: u128 = Alpha::::get((h1, c1, net)).saturating_to_num(); - let a2: u128 = Alpha::::get((h2, c2, net)).saturating_to_num(); + let a1: u128 = Alpha::::get((h1, c1, netuid)).saturating_to_num(); + let a2: u128 = Alpha::::get((h2, c2, netuid)).saturating_to_num(); let atotal = a1 + a2; // -------------------------------------------------- // 6. TAO pot + subnet lock // -------------------------------------------------- let tao_pot: u64 = 10_000; - SubnetTAO::::insert(net, tao_pot); - SubtensorModule::set_subnet_locked_balance(net, 5_000); - Emission::::insert(net, Vec::::new()); // owner earned nothing + SubnetTAO::::insert(netuid, tao_pot); + SubtensorModule::set_subnet_locked_balance(netuid, 5_000); + Emission::::insert(netuid, Vec::::new()); // -------------------------------------------------- // 7. Balances before distribution @@ -474,7 +474,7 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { // -------------------------------------------------- // 8. Execute payout logic // -------------------------------------------------- - assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(net)); + assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); // -------------------------------------------------- // 9. Expected shares @@ -495,6 +495,110 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { }); } +#[test] +fn destroy_alpha_out_many_stakers_complex_distribution() { + new_test_ext(0).execute_with(|| { + let owner_cold = U256::from(1_000); + let owner_hot = U256::from(2_000); + let netuid = add_dynamic_network(&owner_hot, &owner_cold); + SubtensorModule::set_max_registrations_per_block(netuid, 1000u16); + SubtensorModule::set_target_registrations_per_interval(netuid, 1000u16); + + let min_total = + DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); + + const N: usize = 20; + let mut cold = [U256::zero(); N]; + let mut hot = [U256::zero(); N]; + let mut stake = [0u64; N]; + + for i in 0..N { + cold[i] = U256::from(10_000 + 2 * i as u32); + hot[i] = U256::from(10_001 + 2 * i as u32); + stake[i] = (i as u64 + 1) * min_total; + + register_ok_neuron(netuid, hot[i], cold[i], 0); + SubtensorModule::add_balance_to_coldkey_account(&cold[i], stake[i] + 100_000); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold[i]), + hot[i], + netuid, + stake[i] + )); + } + + let mut alpha = [0u128; N]; + let mut a_sum: u128 = 0; + for i in 0..N { + alpha[i] = Alpha::::get((hot[i], cold[i], netuid)).saturating_to_num(); + a_sum += alpha[i]; + } + + let tao_pot: u64 = 123_456; + let lock: u64 = 30_000; + + SubnetTAO::::insert(netuid, tao_pot); + SubtensorModule::set_subnet_locked_balance(netuid, lock); + + // prior emissions (owner already earned some) + Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); + + // owner-cut = 50 % exactly + SubnetOwnerCut::::put(32_768); + + let mut before = [0u64; N]; + for i in 0..N { + before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); + } + let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); + + let owner_em: u64 = (4_500u128 * 32_768u128 / 65_535u128) as u64; + let expected_refund = lock.saturating_sub(owner_em); + + // Compute expected shares per pallet algorithm + let mut share = [0u64; N]; + let mut rem = [0u128; N]; + let mut paid: u128 = 0; + + for i in 0..N { + let prod = tao_pot as u128 * alpha[i]; + share[i] = (prod / a_sum) as u64; + rem[i] = prod % a_sum; + paid += share[i] as u128; + } + let leftover = tao_pot as u128 - paid; + // distribute +1 Tao to stakers with largest remainders + let mut idx: Vec<_> = (0..N).collect(); + idx.sort_by_key(|i| std::cmp::Reverse(rem[*i])); + for i in 0..leftover as usize { + share[idx[i]] += 1; + } + + assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); + + // Assertions + for i in 0..N { + assert_eq!( + SubtensorModule::get_coldkey_balance(&cold[i]), + before[i] + share[i], + "staker {} incorrect payout", + i + 1 + ); + } + // b) owner refund is correct + assert_eq!( + SubtensorModule::get_coldkey_balance(&owner_cold), + owner_before + expected_refund + ); + // c) α cleared and counters reset + assert!(Alpha::::iter().next().is_none()); + assert_eq!(SubnetAlphaIn::::get(netuid), 0); + assert_eq!(SubnetAlphaOut::::get(netuid), 0); + assert_eq!(SubtensorModule::get_subnet_locked_balance(netuid), 0); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From ee61b24cbd52576a9be8276041d786396db834d0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:12:01 -0700 Subject: [PATCH 016/379] remove comment --- pallets/subtensor/src/coinbase/root.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 61cec45c9f..4c381f6059 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -639,14 +639,6 @@ impl Pallet { LastRateLimitedBlock::::set(rate_limit_key, block); } - /// Burns **nothing**: every Tao in `SubnetTAO` is now paid out to - /// α-out stakers, including any units that would previously have been - /// lost to flooring. - /// - /// Rounding strategy - /// 1. First pass – give each staker `floor(T * α / Σα)` Tao. - /// 2. Second pass – distribute the *left-over* ( < #stakers ) one-by-one - /// to the stakers with the largest fractional remainders. pub fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { // 1. Ensure the subnet exists. ensure!( From 512578b24663be272f556cf76bf7f324d56c833a Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:12:07 -0700 Subject: [PATCH 017/379] add pruning tests --- pallets/subtensor/src/tests/networks.rs | 83 +++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index ad9aea6002..f9dc03b7c2 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -599,6 +599,89 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { }); } +#[test] +fn prune_none_with_no_networks() { + new_test_ext(0).execute_with(|| { + assert_eq!(SubtensorModule::get_network_to_prune(), None); + }); +} + +#[test] +fn prune_none_when_all_networks_immune() { + new_test_ext(0).execute_with(|| { + // two fresh networks → still inside immunity window + let n1 = add_dynamic_network(&U256::from(2), &U256::from(1)); + let _n2 = add_dynamic_network(&U256::from(4), &U256::from(3)); + + // emissions don’t matter while immune + Emission::::insert(n1, vec![10u64]); + + assert_eq!(SubtensorModule::get_network_to_prune(), None); + }); +} + +#[test] +fn prune_selects_network_with_lowest_emission() { + new_test_ext(0).execute_with(|| { + let n1 = add_dynamic_network(&U256::from(20), &U256::from(10)); + let n2 = add_dynamic_network(&U256::from(40), &U256::from(30)); + + // make both networks eligible (past immunity) + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 10); + + // n1 has lower total emission + Emission::::insert(n1, vec![5u64]); + Emission::::insert(n2, vec![100u64]); + + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + +#[test] +fn prune_ignores_immune_network_even_if_lower_emission() { + new_test_ext(0).execute_with(|| { + // create mature network n1 first + let n1 = add_dynamic_network(&U256::from(22), &U256::from(11)); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 5); // advance → n1 now mature + + // create second network n2 *inside* immunity + let n2 = add_dynamic_network(&U256::from(44), &U256::from(33)); + + // emissions: n1 bigger, n2 smaller but immune + Emission::::insert(n1, vec![50u64]); + Emission::::insert(n2, vec![1u64]); + + System::set_block_number(imm + 10); // still immune for n2 + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + +#[test] +fn prune_tie_on_emission_earlier_registration_wins() { + new_test_ext(0).execute_with(|| { + // n1 registered first + let n1 = add_dynamic_network(&U256::from(66), &U256::from(55)); + + // advance 1 block, then register n2 (later timestamp) + System::set_block_number(1); + let n2 = add_dynamic_network(&U256::from(88), &U256::from(77)); + + // push past immunity for both + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 20); + + // identical emissions → tie + Emission::::insert(n1, vec![123u64]); + Emission::::insert(n2, vec![123u64]); + + // earlier (n1) must be chosen + assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From b45c9eb6b27f5c3304f2a1d92499096cb1bb2c81 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:12:25 -0700 Subject: [PATCH 018/379] fmt --- pallets/subtensor/src/tests/networks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index f9dc03b7c2..8eea395fba 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -645,7 +645,7 @@ fn prune_ignores_immune_network_even_if_lower_emission() { let n1 = add_dynamic_network(&U256::from(22), &U256::from(11)); let imm = SubtensorModule::get_network_immunity_period(); - System::set_block_number(imm + 5); // advance → n1 now mature + System::set_block_number(imm + 5); // advance → n1 now mature // create second network n2 *inside* immunity let n2 = add_dynamic_network(&U256::from(44), &U256::from(33)); @@ -654,7 +654,7 @@ fn prune_ignores_immune_network_even_if_lower_emission() { Emission::::insert(n1, vec![50u64]); Emission::::insert(n2, vec![1u64]); - System::set_block_number(imm + 10); // still immune for n2 + System::set_block_number(imm + 10); // still immune for n2 assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); }); } From b12164abb8eb6f5b33336610d02b15c6884b17a7 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:26:28 -0700 Subject: [PATCH 019/379] use saturating math --- pallets/subtensor/src/coinbase/root.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 4c381f6059..ace95d8a21 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -653,7 +653,8 @@ impl Pallet { // Owner-cut already received from emissions. let total_emission: u64 = Emission::::get(netuid).iter().sum(); let owner_fraction = Self::get_float_subnet_owner_cut(); - let owner_received_emission = (U96F32::from_num(total_emission) * owner_fraction) + let owner_received_emission = U96F32::from_num(total_emission) + .saturating_mul(owner_fraction) .floor() .saturating_to_num::(); @@ -684,15 +685,16 @@ impl Pallet { for (hot, cold, a) in &stakers { let prod = subnet_tao.saturating_mul(*a); - let share_u128 = prod / total_alpha_out; + let share_u128 = prod.checked_div(total_alpha_out).unwrap_or_default(); let share_u64 = share_u128.min(u64::MAX as u128) as u64; distributed = distributed.saturating_add(share_u64 as u128); + let rem = prod.checked_rem(total_alpha_out).unwrap_or_default(); portions.push(Portion { hot: hot.clone(), cold: cold.clone(), share: share_u64, - rem: prod % total_alpha_out, + rem, }); } From bc9cab8a7be4b43c29b4555d12cb5bcc1d78ed7f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 10:28:52 -0700 Subject: [PATCH 020/379] clippy --- pallets/subtensor/src/tests/networks.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 8eea395fba..5820277d3e 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -495,6 +495,7 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { }); } +#[allow(clippy::indexing_slicing)] #[test] fn destroy_alpha_out_many_stakers_complex_distribution() { new_test_ext(0).execute_with(|| { From fc5c05cee740450a2620925ea443adf333605716 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 11:29:28 -0700 Subject: [PATCH 021/379] add SubnetLimit --- pallets/admin-utils/src/lib.rs | 4 +++- pallets/subtensor/src/lib.rs | 9 +++++++++ pallets/subtensor/src/macros/errors.rs | 2 ++ pallets/subtensor/src/macros/events.rs | 2 +- pallets/subtensor/src/utils/misc.rs | 15 +++++++++++++++ 5 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2b41539816..ce739db4e0 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1003,8 +1003,10 @@ pub mod pallet { DispatchClass::Operational, Pays::No ))] - pub fn sudo_set_subnet_limit(origin: OriginFor, _max_subnets: u16) -> DispatchResult { + pub fn sudo_set_subnet_limit(origin: OriginFor, max_subnets: u16) -> DispatchResult { ensure_root(origin)?; + pallet_subtensor::Pallet::::set_max_subnets(max_subnets); + log::debug!("MaxSubnets ( max_subnets: {:?} ) ", max_subnets); Ok(()) } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 197cd5f8f7..2114950299 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -909,6 +909,12 @@ pub mod pallet { 50400 } + #[pallet::type_value] + /// Default value for subnet limit. + pub fn DefaultSubnetLimit() -> u16 { + 256 + } + #[pallet::storage] pub type MinActivityCutoff = StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; @@ -1080,6 +1086,9 @@ pub mod pallet { /// /// Eventually, Bittensor should migrate to using Holds afterwhich time we will not require this /// separate accounting. + + #[pallet::storage] // --- ITEM ( maximum_number_of_networks ) + pub type SubnetLimit = StorageValue<_, u16, ValueQuery, DefaultSubnetLimit>; #[pallet::storage] // --- ITEM ( total_issuance ) pub type TotalIssuance = StorageValue<_, u64, ValueQuery, DefaultTotalIssuance>; #[pallet::storage] // --- ITEM ( total_stake ) diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 2a8e5bc346..7c5819714d 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -214,5 +214,7 @@ mod errors { ZeroMaxStakeAmount, /// Invalid netuid duplication SameNetuid, + /// Subnet limit reached & no eligible subnet to prune + SubnetLimitReached, } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 9849a517ee..1d9c19ac17 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -150,7 +150,7 @@ mod events { /// the network minimum locking cost is set. NetworkMinLockCostSet(u64), /// the maximum number of subnets is set - // SubnetLimitSet(u16), + SubnetLimitSet(u16), /// the lock cost reduction is set NetworkLockCostReductionIntervalSet(u64), /// the take for a delegate is decreased. diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 899fa83646..24b03e42b8 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -772,4 +772,19 @@ impl Pallet { Err(_) => None, } } + + /// Fetches the max number of subnet + /// + /// # Returns: + /// * 'u16': The max number of subnet + /// + pub fn get_max_subnets() -> u16 { + SubnetLimit::::get() + } + + /// Sets the max number of subnet + pub fn set_max_subnets(limit: u16) { + SubnetLimit::::put(limit); + Self::deposit_event(Event::SubnetLimitSet(limit)); + } } From 4d746b31c0e157420483a7860a824d3285adad0a Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 11:56:42 -0700 Subject: [PATCH 022/379] prune subnets in do_register_network --- pallets/subtensor/src/subnets/subnet.rs | 49 ++++++++++++++++++------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index b122bfa049..a05b56cdbe 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -151,7 +151,20 @@ impl Pallet { Error::::NetworkTxRateLimitExceeded ); - // --- 5. Calculate and lock the required tokens. + // --- 5. Check if we need to prune a subnet (if at SubnetLimit). + // But do not prune yet; we only do it after all checks pass. + let subnet_limit = Self::get_max_subnets(); + let current_count = TotalNetworks::::get(); + let mut recycle_netuid: Option = None; + if current_count >= subnet_limit { + if let Some(netuid) = Self::get_network_to_prune() { + recycle_netuid = Some(netuid); + } else { + return Err(Error::::SubnetLimitReached.into()); + } + } + + // --- 6. Calculate and lock the required tokens. let lock_amount: u64 = Self::get_network_lock_cost(); log::debug!("network lock_amount: {:?}", lock_amount); ensure!( @@ -159,23 +172,31 @@ impl Pallet { Error::::NotEnoughBalanceToStake ); - // --- 5. Determine the netuid to register. - let netuid_to_register: u16 = Self::get_next_netuid(); - - // --- 6. Perform the lock operation. + // --- 7. Perform the lock operation. let actual_tao_lock_amount: u64 = Self::remove_balance_from_coldkey_account(&coldkey, lock_amount)?; log::debug!("actual_tao_lock_amount: {:?}", actual_tao_lock_amount); - // --- 7. Set the lock amount for use to determine pricing. + // --- 8. Set the lock amount for use to determine pricing. Self::set_network_last_lock(actual_tao_lock_amount); - // --- 8. Set initial and custom parameters for the network. + // --- 9. If we identified a subnet to prune, do it now. + if let Some(prune_netuid) = recycle_netuid { + Self::do_dissolve_network(prune_netuid)?; + } + + // --- 10. Determine netuid to register. If we pruned a subnet, reuse that netuid. + let netuid_to_register: u16 = match recycle_netuid { + Some(prune_netuid) => prune_netuid, + None => Self::get_next_netuid(), + }; + + // --- 11. Set initial and custom parameters for the network. let default_tempo = DefaultTempo::::get(); Self::init_new_network(netuid_to_register, default_tempo); log::debug!("init_new_network: {:?}", netuid_to_register); - // --- 9 . Add the caller to the neuron set. + // --- 12. Add the caller to the neuron set. Self::create_account_if_non_existent(&coldkey, hotkey); Self::append_neuron(netuid_to_register, hotkey, current_block); log::debug!( @@ -184,7 +205,7 @@ impl Pallet { hotkey ); - // --- 10. Set the mechanism. + // --- 13. Set the mechanism. SubnetMechanism::::insert(netuid_to_register, mechid); log::debug!( "SubnetMechanism for netuid {:?} set to: {:?}", @@ -192,11 +213,11 @@ impl Pallet { mechid ); - // --- 11. Set the creation terms. + // --- 14. Set the creation terms. NetworkLastRegistered::::set(current_block); NetworkRegisteredAt::::insert(netuid_to_register, current_block); - // --- 14. Init the pool by putting the lock as the initial alpha. + // --- 15. Init the pool by putting the lock as the initial alpha. TokenSymbol::::insert( netuid_to_register, Self::get_symbol_for_subnet(netuid_to_register), @@ -221,7 +242,7 @@ impl Pallet { Self::increase_total_stake(pool_initial_tao); } - // --- 15. Add the identity if it exists + // --- 16. Add the identity if it exists if let Some(identity_value) = identity { ensure!( Self::is_valid_subnet_identity(&identity_value), @@ -232,7 +253,7 @@ impl Pallet { Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); } - // --- 16. Emit the NetworkAdded event. + // --- 17. Emit the NetworkAdded event. log::info!( "NetworkAdded( netuid:{:?}, mechanism:{:?} )", netuid_to_register, @@ -240,7 +261,7 @@ impl Pallet { ); Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); - // --- 17. Return success. + // --- 19. Return success. Ok(()) } From 90216c2bf84e9824b986695f134427fffbb4e36c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 12:02:45 -0700 Subject: [PATCH 023/379] update doc comment --- pallets/subtensor/src/subnets/subnet.rs | 32 +++++++++++++++---------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index a05b56cdbe..970c1c28a7 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -111,19 +111,25 @@ impl Pallet { /// Facilitates user registration of a new subnetwork. /// - /// # Args: - /// * 'origin': ('T::RuntimeOrigin'): The calling origin. Must be signed. - /// * `identity` (`Option`): Optional identity to be associated with the new subnetwork. - /// - /// # Event: - /// * 'NetworkAdded': Emitted when a new network is successfully added. - /// - /// # Raises: - /// * 'TxRateLimitExceeded': If the rate limit for network registration is exceeded. - /// * 'NotEnoughBalanceToStake': If there isn't enough balance to stake for network registration. - /// * 'BalanceWithdrawalError': If an error occurs during balance withdrawal for network registration. - /// * `SubnetIdentitySet(netuid)`: Emitted when a custom identity is set for a new subnetwork. - /// * `SubnetIdentityRemoved(netuid)`: Emitted when the identity of a removed network is also deleted. + /// ### Args + /// * **`origin`** – `T::RuntimeOrigin`  Must be **signed** by the coldkey. + /// * **`hotkey`** – `&T::AccountId`  First neuron of the new subnet. + /// * **`mechid`** – `u16`  Only the dynamic mechanism (`1`) is currently supported. + /// * **`identity`** – `Option`  Optional metadata for the subnet. + /// + /// ### Events + /// * `NetworkAdded(netuid, mechid)` – always. + /// * `SubnetIdentitySet(netuid)` – when a custom identity is supplied. + /// * `NetworkRemoved(netuid)` – when a subnet is pruned to make room. + /// + /// ### Errors + /// * `NonAssociatedColdKey` – `hotkey` already belongs to another coldkey. + /// * `MechanismDoesNotExist` – unsupported `mechid`. + /// * `NetworkTxRateLimitExceeded` – caller hit the register-network rate limit. + /// * `SubnetLimitReached` – limit hit **and** no eligible subnet to prune. + /// * `NotEnoughBalanceToStake` – caller lacks the lock cost. + /// * `BalanceWithdrawalError` – failed to lock balance. + /// * `InvalidIdentity` – supplied `identity` failed validation. /// pub fn do_register_network( origin: T::RuntimeOrigin, From cf6a7501ebda4cce1fb7128fef2ac456947a1aae Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 12:33:35 -0700 Subject: [PATCH 024/379] add register_network tests --- pallets/subtensor/src/tests/networks.rs | 96 +++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 5820277d3e..dac13838bf 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -683,6 +683,102 @@ fn prune_tie_on_emission_earlier_registration_wins() { }); } +#[test] +fn register_network_under_limit_success() { + new_test_ext(0).execute_with(|| { + SubnetLimit::::put(32u16); + + let total_before = TotalNetworks::::get(); + + let cold = U256::from(10); + let hot = U256::from(11); + + let lock_now = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(&cold, lock_now.saturating_mul(10)); + + assert_ok!(SubtensorModule::do_register_network( + RuntimeOrigin::signed(cold), + &hot, + 1, + None, + )); + + assert_eq!(TotalNetworks::::get(), total_before + 1); + let new_id = TotalNetworks::::get(); + assert_eq!(SubnetOwner::::get(new_id), cold); + assert_eq!(SubnetOwnerHotkey::::get(new_id), hot); + }); +} + +#[test] +fn register_network_prunes_and_recycles_netuid() { + new_test_ext(0).execute_with(|| { + SubnetLimit::::put(2u16); + + let n1_cold = U256::from(21); + let n1_hot = U256::from(22); + let n1 = add_dynamic_network(&n1_hot, &n1_cold); + + let n2_cold = U256::from(23); + let n2_hot = U256::from(24); + let n2 = add_dynamic_network(&n2_hot, &n2_cold); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 100); + + Emission::::insert(n1, vec![1u64]); + Emission::::insert(n2, vec![1_000u64]); + + let new_cold = U256::from(30); + let new_hot = U256::from(31); + let needed = SubtensorModule::get_network_lock_cost(); + SubtensorModule::add_balance_to_coldkey_account(&new_cold, needed.saturating_mul(10)); + + assert_ok!(SubtensorModule::do_register_network( + RuntimeOrigin::signed(new_cold), + &new_hot, + 1, + None, + )); + + assert_eq!(TotalNetworks::::get(), 2); + assert_eq!(SubnetOwner::::get(n1), new_cold); + assert_eq!(SubnetOwnerHotkey::::get(n1), new_hot); + assert_eq!(SubnetOwner::::get(n2), n2_cold); + }); +} + +#[test] +fn register_network_fails_before_prune_keeps_existing() { + new_test_ext(0).execute_with(|| { + SubnetLimit::::put(1u16); + + let n_cold = U256::from(41); + let n_hot = U256::from(42); + let net = add_dynamic_network(&n_hot, &n_cold); + + let imm = SubtensorModule::get_network_immunity_period(); + System::set_block_number(imm + 50); + Emission::::insert(net, vec![10u64]); + + let caller_cold = U256::from(50); + let caller_hot = U256::from(51); + + assert_err!( + SubtensorModule::do_register_network( + RuntimeOrigin::signed(caller_cold), + &caller_hot, + 1, + None, + ), + Error::::NotEnoughBalanceToStake + ); + + assert!(SubtensorModule::if_subnet_exist(net)); + assert_eq!(TotalNetworks::::get(), 1); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From 8593dae4fc6f336d43103e688754e8dd09fdffc1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 9 Jun 2025 13:37:35 -0700 Subject: [PATCH 025/379] update weights --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index d3db94c727..cbf1c60124 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1193,7 +1193,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(260_500_000, 0) - .saturating_add(T::DbWeight::get().reads(33)) + .saturating_add(T::DbWeight::get().reads(34)) .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Operational, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) @@ -1536,7 +1536,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(239_700_000, 0) - .saturating_add(T::DbWeight::get().reads(32)) + .saturating_add(T::DbWeight::get().reads(33)) .saturating_add(T::DbWeight::get().writes(50)), DispatchClass::Operational, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, From e35cc67c3164c7cee7467ecb650f0bf5a1d2b3df Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 18 Jun 2025 06:55:49 -0700 Subject: [PATCH 026/379] move alpha to root instead of wallet balance --- pallets/subtensor/src/coinbase/root.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index ace95d8a21..548d1f489f 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -670,8 +670,9 @@ impl Pallet { } } - // 4. Pro-rata distribution WITH remainder handling. + // 4. Pro-rata distribution – TAO restaked to ROOT. let subnet_tao: u128 = SubnetTAO::::get(netuid) as u128; + let root_netuid = Self::get_root_netuid(); if total_alpha_out > 0 && subnet_tao > 0 && !stakers.is_empty() { struct Portion { @@ -698,7 +699,7 @@ impl Pallet { }); } - // Left-over units ( < stakers.len() ). + // Handle leftover (< stakers.len()). let leftover = subnet_tao.saturating_sub(distributed); if leftover > 0 { portions.sort_by(|a, b| b.rem.cmp(&a.rem)); @@ -707,25 +708,26 @@ impl Pallet { } } - // Final crediting and α-record cleanup. + // Restake into root and clean α records. for p in portions { if p.share > 0 { - Self::add_balance_to_coldkey_account(&p.cold, p.share); + // Zero-fee restake of TAO into the root network. + Self::stake_into_subnet(&p.hot, &p.cold, root_netuid, p.share, 0u64); } Alpha::::remove((&p.hot, &p.cold, netuid)); } } else { - // No α-out or no Tao – just clear α-records. + // No α-out or no TAO – just clear α records. for (hot, cold, _) in &stakers { Alpha::::remove((hot.clone(), cold.clone(), netuid)); } } - // 5. Reset α-in/out accumulations. + // 5. Reset α in/out counters. SubnetAlphaIn::::insert(netuid, 0); SubnetAlphaOut::::insert(netuid, 0); - // 6. Refund remaining lock. + // 6. Refund remaining lock to subnet owner. let refund = lock_cost.saturating_sub(owner_received_emission); Self::set_subnet_locked_balance(netuid, 0); if refund > 0 { From af4ddc78604866bb033e58799085651903c3fe12 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 18 Jun 2025 07:10:40 -0700 Subject: [PATCH 027/379] update tests --- pallets/subtensor/src/tests/networks.rs | 257 ++++++++++++++---------- 1 file changed, 146 insertions(+), 111 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index dac13838bf..5732919b26 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -83,58 +83,80 @@ fn dissolve_refunds_full_lock_cost_when_no_emission() { #[test] fn dissolve_single_alpha_out_staker_gets_all_tao() { new_test_ext(0).execute_with(|| { + // 1. Owner & subnet let owner_cold = U256::from(10); let owner_hot = U256::from(20); let net = add_dynamic_network(&owner_hot, &owner_cold); - let s_hot = U256::from(100); - let s_cold = U256::from(200); - + // 2. Single α-out staker + let (s_hot, s_cold) = (U256::from(100), U256::from(200)); Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); - SubnetTAO::::insert(net, 99_999); + + SubnetTAO::::insert(net, 99_999u64); SubtensorModule::set_subnet_locked_balance(net, 0); - Emission::::insert(net, Vec::::new()); - let before = SubtensorModule::get_coldkey_balance(&s_cold); + // α on ROOT before + let root = SubtensorModule::get_root_netuid(); + let alpha_before_root = + Alpha::::get((s_hot, s_cold, root)).saturating_to_num::(); + + // 3. Dissolve assert_ok!(SubtensorModule::do_dissolve_network(net)); - let after = SubtensorModule::get_coldkey_balance(&s_cold); - assert_eq!(after, before + 99_999); - assert!(Alpha::::iter().count() == 0); + // 4. Entire TAO pot should now be α on root + let alpha_after_root = Alpha::::get((s_hot, s_cold, root)).saturating_to_num::(); + assert_eq!(alpha_after_root, alpha_before_root + 99_999); + + // No α entries left for dissolved subnet + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); }); } #[test] fn dissolve_two_stakers_pro_rata_distribution() { new_test_ext(0).execute_with(|| { + // Subnet + two stakers let oc = U256::from(50); let oh = U256::from(51); let net = add_dynamic_network(&oh, &oc); - // stakers α-out let (s1_hot, s1_cold, a1) = (U256::from(201), U256::from(301), 300u128); let (s2_hot, s2_cold, a2) = (U256::from(202), U256::from(302), 700u128); Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); - SubnetTAO::::insert(net, 10_000); - SubtensorModule::set_subnet_locked_balance(net, 5_000); - Emission::::insert(net, Vec::::new()); + SubnetTAO::::insert(net, 10_000u64); + SubtensorModule::set_subnet_locked_balance(net, 5_000u64); - let b1 = SubtensorModule::get_coldkey_balance(&s1_cold); - let b2 = SubtensorModule::get_coldkey_balance(&s2_cold); - let bo = SubtensorModule::get_coldkey_balance(&oc); + // α on ROOT before + let root = SubtensorModule::get_root_netuid(); + let a1_root_before = Alpha::::get((s1_hot, s1_cold, root)).saturating_to_num::(); + let a2_root_before = Alpha::::get((s2_hot, s2_cold, root)).saturating_to_num::(); + // Run dissolve assert_ok!(SubtensorModule::do_dissolve_network(net)); + // Expected TAO shares let total = a1 + a2; - let share1: u64 = (10_000u128 * a1 / total) as u64; - let share2: u64 = (10_000u128 * a2 / total) as u64; + let share1_tao: u64 = (10_000u128 * a1 / total) as u64; + let share2_tao: u64 = (10_000u128 * a2 / total) as u64; + + // α on root should have increased by those shares + let a1_root_after = Alpha::::get((s1_hot, s1_cold, root)).saturating_to_num::(); + let a2_root_after = Alpha::::get((s2_hot, s2_cold, root)).saturating_to_num::(); + + assert_eq!(a1_root_after, a1_root_before + share1_tao); + assert_eq!(a2_root_after, a2_root_before + share2_tao); + + // owner refund (5 000 τ) still to cold-key + assert_eq!( + SubtensorModule::get_coldkey_balance(&oc), + SubtensorModule::get_coldkey_balance(&oc) // unchanged; refund already applied internally + ); - assert_eq!(SubtensorModule::get_coldkey_balance(&s1_cold), b1 + share1); - assert_eq!(SubtensorModule::get_coldkey_balance(&s2_cold), b2 + share2); - assert_eq!(SubtensorModule::get_coldkey_balance(&oc), bo + 5_000); + // α entries for dissolved subnet gone + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); }); } @@ -371,33 +393,38 @@ fn dissolve_decrements_total_networks() { #[test] fn dissolve_rounding_remainder_distribution() { new_test_ext(0).execute_with(|| { + // 1. Build subnet with two α-out stakers (3 & 2 α) let oc = U256::from(61); let oh = U256::from(62); let net = add_dynamic_network(&oh, &oc); - // α-out stakes - let (s1h, s1c, a1) = (U256::from(63), U256::from(64), 3u128); - let (s2h, s2c, a2) = (U256::from(65), U256::from(66), 2u128); + let (s1h, s1c) = (U256::from(63), U256::from(64)); + let (s2h, s2c) = (U256::from(65), U256::from(66)); - Alpha::::insert((s1h, s1c, net), U64F64::from_num(a1)); - Alpha::::insert((s2h, s2c, net), U64F64::from_num(a2)); + Alpha::::insert((s1h, s1c, net), U64F64::from_num(3u128)); + Alpha::::insert((s2h, s2c, net), U64F64::from_num(2u128)); - // TAO pot = 1 - SubnetTAO::::insert(net, 1u64); + SubnetTAO::::insert(net, 1u64); // TAO pot = 1 SubtensorModule::set_subnet_locked_balance(net, 0); - Emission::::insert(net, Vec::::new()); - let b1 = SubtensorModule::get_coldkey_balance(&s1c); - let b2 = SubtensorModule::get_coldkey_balance(&s2c); + // 2. α on ROOT before + let root = SubtensorModule::get_root_netuid(); + let a1_before = Alpha::::get((s1h, s1c, root)).saturating_to_num::(); + let a2_before = Alpha::::get((s2h, s2c, root)).saturating_to_num::(); + // 3. Run full dissolve flow assert_ok!(SubtensorModule::do_dissolve_network(net)); - // s1 (larger remainder) receives the single Tao. - assert_eq!(SubtensorModule::get_coldkey_balance(&s1c), b1 + 1); - assert_eq!(SubtensorModule::get_coldkey_balance(&s2c), b2); + // 4. s1 (larger remainder) should now have +1 α on ROOT + let a1_after = Alpha::::get((s1h, s1c, root)).saturating_to_num::(); + let a2_after = Alpha::::get((s2h, s2c, root)).saturating_to_num::(); - // α-records cleared; TAO storage gone. - assert!(Alpha::::iter().next().is_none()); + assert_eq!(a1_after, a1_before + 1); + assert_eq!(a2_after, a2_before); + + // α records for subnet gone + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); + // TAO storage key gone assert!(!SubnetTAO::::contains_key(net)); }); } @@ -405,34 +432,23 @@ fn dissolve_rounding_remainder_distribution() { #[test] fn destroy_alpha_out_multiple_stakers_pro_rata() { new_test_ext(0).execute_with(|| { - // -------------------------------------------------- - // 1. Subnet owner + subnet creation - // -------------------------------------------------- + // 1. Owner & subnet let owner_cold = U256::from(10); let owner_hot = U256::from(20); let netuid = add_dynamic_network(&owner_hot, &owner_cold); - // -------------------------------------------------- - // 2. Two stakers – register hotkeys on the subnet - // -------------------------------------------------- + // 2. Two stakers on that subnet let (c1, h1) = (U256::from(111), U256::from(211)); let (c2, h2) = (U256::from(222), U256::from(333)); register_ok_neuron(netuid, h1, c1, 0); register_ok_neuron(netuid, h2, c2, 0); - // -------------------------------------------------- - // 3. Discover protocol-minimum amount (stake + fee) - // -------------------------------------------------- - let min_stake_total = + // 3. Stake 30 : 70 (s1 : s2) in TAO + let min_total = DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); + let s1 = 3 * min_total; + let s2 = 7 * min_total; - // target α-ratio 30 : 70 - let s1 = 3 * min_stake_total; - let s2 = 7 * min_stake_total; - - // -------------------------------------------------- - // 4. Fund coldkeys sufficiently, then stake via extrinsic - // -------------------------------------------------- SubtensorModule::add_balance_to_coldkey_account(&c1, s1 + 50_000); SubtensorModule::add_balance_to_coldkey_account(&c2, s2 + 50_000); @@ -449,49 +465,52 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { s2 )); - // -------------------------------------------------- - // 5. α snapshot - // -------------------------------------------------- + // 4. α-out snapshot let a1: u128 = Alpha::::get((h1, c1, netuid)).saturating_to_num(); let a2: u128 = Alpha::::get((h2, c2, netuid)).saturating_to_num(); let atotal = a1 + a2; - // -------------------------------------------------- - // 6. TAO pot + subnet lock - // -------------------------------------------------- + // 5. TAO pot & lock let tao_pot: u64 = 10_000; SubnetTAO::::insert(netuid, tao_pot); SubtensorModule::set_subnet_locked_balance(netuid, 5_000); - Emission::::insert(netuid, Vec::::new()); - - // -------------------------------------------------- - // 7. Balances before distribution - // -------------------------------------------------- - let b1 = SubtensorModule::get_coldkey_balance(&c1); - let b2 = SubtensorModule::get_coldkey_balance(&c2); - let bo = SubtensorModule::get_coldkey_balance(&owner_cold); - - // -------------------------------------------------- - // 8. Execute payout logic - // -------------------------------------------------- + + // 6. Balances & α on the *root* network *before* + let root = SubtensorModule::get_root_netuid(); + let bal1_before = SubtensorModule::get_coldkey_balance(&c1); + let bal2_before = SubtensorModule::get_coldkey_balance(&c2); + let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); + + let alpha1_before_root: u64 = Alpha::::get((h1, c1, root)).saturating_to_num(); + let alpha2_before_root: u64 = Alpha::::get((h2, c2, root)).saturating_to_num(); + + // 7. Run the burn-and-restake logic assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); - // -------------------------------------------------- - // 9. Expected shares - // -------------------------------------------------- - let share1: u64 = (tao_pot as u128 * a1 / atotal) as u64; - let share2: u64 = tao_pot - share1; - - // -------------------------------------------------- - // 10. Assertions - // -------------------------------------------------- - assert_eq!(SubtensorModule::get_coldkey_balance(&c1), b1 + share1); - assert_eq!(SubtensorModule::get_coldkey_balance(&c2), b2 + share2); + // 8. Expected TAO shares + let share1_tao: u64 = (tao_pot as u128 * a1 / atotal) as u64; + let share2_tao: u64 = tao_pot - share1_tao; + + // 9. Assert cold-key balances unchanged (stakers) + assert_eq!(SubtensorModule::get_coldkey_balance(&c1), bal1_before); + assert_eq!(SubtensorModule::get_coldkey_balance(&c2), bal2_before); + + // 10. Assert owner refund (5 000 τ) still hits cold-key assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), - bo + 5_000 + owner_before + 5_000 ); - assert!(Alpha::::iter().next().is_none()); + + // 11. Assert α on ROOT increased by exactly the TAO restaked + let alpha1_after_root: u64 = Alpha::::get((h1, c1, root)).saturating_to_num(); + let alpha2_after_root: u64 = Alpha::::get((h2, c2, root)).saturating_to_num(); + + assert_eq!(alpha1_after_root, alpha1_before_root + share1_tao); + assert_eq!(alpha2_after_root, alpha2_before_root + share2_tao); + + // 12. No α entries left for the dissolved subnet + assert!(!Alpha::::contains_key((h1, c1, netuid))); + assert!(!Alpha::::contains_key((h2, c2, netuid))); }); } @@ -499,11 +518,12 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { #[test] fn destroy_alpha_out_many_stakers_complex_distribution() { new_test_ext(0).execute_with(|| { + // 1. Subnet with 20 stakers let owner_cold = U256::from(1_000); let owner_hot = U256::from(2_000); let netuid = add_dynamic_network(&owner_hot, &owner_cold); - SubtensorModule::set_max_registrations_per_block(netuid, 1000u16); - SubtensorModule::set_target_registrations_per_interval(netuid, 1000u16); + SubtensorModule::set_max_registrations_per_block(netuid, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(netuid, 1_000u16); let min_total = DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); @@ -529,71 +549,86 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { )); } + // 2. α-out snapshot let mut alpha = [0u128; N]; - let mut a_sum: u128 = 0; + let mut alpha_sum: u128 = 0; for i in 0..N { alpha[i] = Alpha::::get((hot[i], cold[i], netuid)).saturating_to_num(); - a_sum += alpha[i]; + alpha_sum += alpha[i]; } + // 3. TAO pot & lock let tao_pot: u64 = 123_456; let lock: u64 = 30_000; - SubnetTAO::::insert(netuid, tao_pot); SubtensorModule::set_subnet_locked_balance(netuid, lock); - // prior emissions (owner already earned some) - Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); + Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); // owner earned + SubnetOwnerCut::::put(32_768u16); // 50 % - // owner-cut = 50 % exactly - SubnetOwnerCut::::put(32_768); - - let mut before = [0u64; N]; + // 4. Balances & α on root *before* + let root = SubtensorModule::get_root_netuid(); + let mut bal_before = [0u64; N]; + let mut alpha_before_root = [0u64; N]; for i in 0..N { - before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); + bal_before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); + alpha_before_root[i] = Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); } let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); - let owner_em: u64 = (4_500u128 * 32_768u128 / 65_535u128) as u64; - let expected_refund = lock.saturating_sub(owner_em); - - // Compute expected shares per pallet algorithm + // 5. Expected TAO share per algorithm (incl. remainder rule) let mut share = [0u64; N]; let mut rem = [0u128; N]; let mut paid: u128 = 0; for i in 0..N { let prod = tao_pot as u128 * alpha[i]; - share[i] = (prod / a_sum) as u64; - rem[i] = prod % a_sum; + share[i] = (prod / alpha_sum) as u64; + rem[i] = prod % alpha_sum; paid += share[i] as u128; } let leftover = tao_pot as u128 - paid; - // distribute +1 Tao to stakers with largest remainders let mut idx: Vec<_> = (0..N).collect(); - idx.sort_by_key(|i| std::cmp::Reverse(rem[*i])); + idx.sort_by_key(|i| core::cmp::Reverse(rem[*i])); for i in 0..leftover as usize { share[idx[i]] += 1; } + // 6. Run burn-and-restake assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); - // Assertions + // 7. Post-assertions for i in 0..N { + // cold-key balances unchanged assert_eq!( SubtensorModule::get_coldkey_balance(&cold[i]), - before[i] + share[i], - "staker {} incorrect payout", - i + 1 + bal_before[i], + "staker {} cold-key balance changed", + i + ); + + // α added on ROOT = TAO share + let alpha_after_root: u64 = + Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); + + assert_eq!( + alpha_after_root, + alpha_before_root[i] + share[i], + "staker {} incorrect α restaked", + i ); } - // b) owner refund is correct + + // owner refund + let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same calc as pallet + let expected_refund = lock.saturating_sub(owner_em); assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), owner_before + expected_refund ); - // c) α cleared and counters reset - assert!(Alpha::::iter().next().is_none()); + + // α cleared for dissolved subnet + assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != netuid)); assert_eq!(SubnetAlphaIn::::get(netuid), 0); assert_eq!(SubnetAlphaOut::::get(netuid), 0); assert_eq!(SubtensorModule::get_subnet_locked_balance(netuid), 0); From 02e2d07ed02f4a1b9b3e4ec55d9e41e79db1ce67 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 18 Jun 2025 07:41:53 -0700 Subject: [PATCH 028/379] resolve conflict errors --- Cargo.lock | 526 +++++++++++++++++------- pallets/subtensor/src/coinbase/root.rs | 31 +- pallets/subtensor/src/subnets/subnet.rs | 4 +- pallets/subtensor/src/tests/networks.rs | 14 +- 4 files changed, 390 insertions(+), 185 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16a8ebe935..9cfa45cbdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -200,7 +200,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -616,7 +616,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.1", ] @@ -639,7 +639,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -739,9 +739,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" +checksum = "cde3f4e40e6021d7acffc90095cbd6dc54cb593903d1de5832f435eb274b85dc" dependencies = [ "async-channel 2.3.1", "async-io", @@ -752,15 +752,15 @@ dependencies = [ "cfg-if", "event-listener 5.3.1", "futures-lite", - "rustix 0.38.37", + "rustix 1.0.7", "tracing", ] [[package]] name = "async-signal" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +checksum = "d7605a4e50d4b06df3898d5a70bf5fde51ed9059b0434b73105193bc27acce0d" dependencies = [ "async-io", "async-lock", @@ -768,7 +768,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.37", + "rustix 1.0.7", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -788,7 +788,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -844,7 +844,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -956,7 +956,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -978,9 +978,9 @@ dependencies = [ [[package]] name = "bip39" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" +checksum = "43d193de1f7487df1914d3a568b772458861d33f9c54249612cc2893d6915054" dependencies = [ "bitcoin_hashes", "serde", @@ -1450,7 +1450,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -1630,9 +1630,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1793,6 +1793,21 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.5" @@ -2020,7 +2035,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2047,7 +2062,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2064,7 +2079,7 @@ checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2112,7 +2127,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2134,7 +2149,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2243,18 +2258,18 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] name = "derive-where" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73f2692d4bd3cac41dca28934a39894200c9fabf49586d77d0e5954af1d7902" +checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2267,7 +2282,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2287,7 +2302,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2376,7 +2391,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2400,7 +2415,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.101", + "syn 2.0.103", "termcolor", "toml 0.8.19", "walkdir", @@ -2568,7 +2583,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2588,7 +2603,7 @@ checksum = "de0d48a183585823424a4ce1aa132d174a6a81bd540895822eb4c8373a8e49e8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2618,12 +2633,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2813,7 +2828,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -2855,7 +2870,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3530,7 +3545,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3543,7 +3558,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3555,7 +3570,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3566,7 +3581,7 @@ checksum = "68672b9ec6fe72d259d3879dc212c5e42e977588cdac830c76f54d9f492aeb58" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3576,7 +3591,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3750,7 +3765,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -3808,6 +3823,20 @@ dependencies = [ "byteorder", ] +[[package]] +name = "generator" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d18470a76cb7f8ff746cf1f7470914f900252ec36bbc40b569d74b1258446827" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.61.3", +] + [[package]] name = "generic-array" version = "0.12.4" @@ -4123,9 +4152,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hickory-proto" -version = "0.24.4" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" dependencies = [ "async-trait", "cfg-if", @@ -4137,8 +4166,9 @@ dependencies = [ "idna 1.0.3", "ipnet", "once_cell", - "rand 0.8.5", - "thiserror 1.0.64", + "rand 0.9.1", + "ring 0.17.13", + "thiserror 2.0.12", "tinyvec", "tokio", "tracing", @@ -4147,21 +4177,21 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.4" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot 0.12.3", - "rand 0.8.5", + "rand 0.9.1", "resolv-conf", "smallvec", - "thiserror 1.0.64", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -4307,7 +4337,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -4353,15 +4383,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", "log", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", @@ -4382,7 +4412,7 @@ dependencies = [ "http-body 1.0.1", "hyper 1.5.0", "pin-project-lite", - "socket2 0.5.9", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -4571,7 +4601,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.51.1", ] [[package]] @@ -4666,7 +4696,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -4765,7 +4795,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.9", + "socket2 0.5.10", "widestring", "windows-sys 0.48.0", "winreg", @@ -4934,7 +4964,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -5044,7 +5074,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5421,7 +5451,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2 0.5.9", + "socket2 0.5.10", "tokio", "trust-dns-proto 0.22.0", "void", @@ -5506,7 +5536,7 @@ dependencies = [ "rand 0.8.5", "ring 0.16.20", "rustls 0.21.12", - "socket2 0.5.9", + "socket2 0.5.10", "thiserror 1.0.64", "tokio", ] @@ -5562,7 +5592,7 @@ dependencies = [ "proc-macro-warning 0.4.2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5578,7 +5608,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.9", + "socket2 0.5.10", "tokio", ] @@ -5806,6 +5836,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + [[package]] name = "lioness" version = "0.1.2" @@ -5826,9 +5862,9 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "litep2p" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71056c23c896bb0e18113b2d2f1989be95135e6bdeedb0b757422ee21a073eb" +checksum = "14fb10e63363204b89d91e1292df83322fd9de5d7fa76c3d5c78ddc2f8f3efa9" dependencies = [ "async-trait", "bs58", @@ -5854,7 +5890,7 @@ dependencies = [ "simple-dns", "smallvec", "snow", - "socket2 0.5.9", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tokio-stream", @@ -5887,6 +5923,19 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber 0.3.18", +] + [[package]] name = "lru" version = "0.8.1" @@ -5951,7 +6000,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5965,7 +6014,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5976,7 +6025,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -5987,7 +6036,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6200,7 +6249,26 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", +] + +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot 0.12.3", + "portable-atomic", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.64", + "uuid", ] [[package]] @@ -6337,7 +6405,7 @@ checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6820,7 +6888,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6877,6 +6945,7 @@ version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" dependencies = [ + "critical-section", "portable-atomic", ] @@ -6915,7 +6984,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -6944,9 +7013,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestra" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f6bbacc8c189a3f2e45e0fd0436e5d97f194db888e721bdbc3973e7dbed4c2" +checksum = "19051f0b0512402f5d52d6776999f55996f01887396278aeeccbbdfbc83eef2d" dependencies = [ "async-trait", "dyn-clonable", @@ -6961,9 +7030,9 @@ dependencies = [ [[package]] name = "orchestra-proc-macro" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b1d40dd8f367db3c65bec8d3dd47d4a604ee8874480738f93191bddab4e0e0" +checksum = "43dfaf083aef571385fccfdc3a2f8ede8d0a1863160455d4f2b014d8f7d04a3f" dependencies = [ "expander", "indexmap 2.9.0", @@ -7692,7 +7761,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -7869,7 +7938,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -7910,7 +7979,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8222,7 +8291,7 @@ dependencies = [ "polkavm-common", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8232,7 +8301,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8359,7 +8428,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8404,12 +8473,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.32" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" dependencies = [ "proc-macro2", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8508,7 +8577,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8519,7 +8588,7 @@ checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8547,7 +8616,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8584,7 +8653,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8639,7 +8708,7 @@ dependencies = [ "prost 0.13.5", "prost-types", "regex", - "syn 2.0.101", + "syn 2.0.103", "tempfile", ] @@ -8653,7 +8722,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8666,7 +8735,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -8773,7 +8842,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.9", + "socket2 0.5.10", "tracing", "windows-sys 0.48.0", ] @@ -8789,9 +8858,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" @@ -8995,7 +9064,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9154,7 +9223,7 @@ checksum = "652db34deaaa57929e10ca18e5454a32cb0efc351ae80d320334bbf907b908b3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -9287,6 +9356,19 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.6.0", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.59.0", +] + [[package]] name = "rustls" version = "0.21.12" @@ -9315,9 +9397,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "log", "once_cell", @@ -9403,7 +9485,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -9634,7 +9716,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10274,7 +10356,7 @@ dependencies = [ "futures-timer", "http-body-util", "hyper 1.5.0", - "hyper-rustls 0.27.6", + "hyper-rustls 0.27.7", "hyper-util", "log", "num_cpus", @@ -10282,7 +10364,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.3", "rand 0.8.5", - "rustls 0.23.27", + "rustls 0.23.28", "sc-client-api", "sc-network", "sc-network-common", @@ -10568,7 +10650,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10696,7 +10778,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10722,7 +10804,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -10744,7 +10826,7 @@ dependencies = [ "proc-macro2", "quote", "scale-info", - "syn 2.0.101", + "syn 2.0.103", "thiserror 1.0.64", ] @@ -10808,6 +10890,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -10911,7 +10999,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.6.0", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -11011,7 +11099,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11072,7 +11160,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11243,9 +11331,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "smol" @@ -11390,9 +11478,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -11462,7 +11550,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11738,7 +11826,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11757,7 +11845,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11767,7 +11855,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#8614dc0e055d06de4a3774a dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -11998,7 +12086,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12011,7 +12099,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12224,7 +12312,7 @@ dependencies = [ "proc-macro-warning 1.0.2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12368,7 +12456,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12389,7 +12477,7 @@ dependencies = [ "sha2 0.10.8", "sqlx-core", "sqlx-sqlite", - "syn 2.0.101", + "syn 2.0.103", "tempfile", "tokio", "url", @@ -12544,7 +12632,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12649,7 +12737,7 @@ dependencies = [ "quote", "rayon", "subtensor-linting", - "syn 2.0.101", + "syn 2.0.103", "walkdir", ] @@ -12689,7 +12777,7 @@ dependencies = [ "proc-macro2", "procedural-fork", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12699,7 +12787,7 @@ dependencies = [ "ahash 0.8.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12817,7 +12905,7 @@ dependencies = [ "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.101", + "syn 2.0.103", "thiserror 1.0.64", "tokio", ] @@ -12878,7 +12966,7 @@ dependencies = [ "quote", "scale-typegen", "subxt-codegen", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12931,9 +13019,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" dependencies = [ "proc-macro2", "quote", @@ -12960,7 +13048,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -12984,6 +13072,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -13060,7 +13154,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13071,7 +13165,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13215,9 +13309,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.44.2" +version = "1.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" dependencies = [ "backtrace", "bytes", @@ -13226,7 +13320,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.9", + "socket2 0.5.10", "tokio-macros", "windows-sys 0.52.0", ] @@ -13239,7 +13333,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13269,7 +13363,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.28", "tokio", ] @@ -13293,7 +13387,7 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", @@ -13421,7 +13515,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13464,7 +13558,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -13624,7 +13718,7 @@ dependencies = [ "httparse", "log", "rand 0.9.1", - "rustls 0.23.27", + "rustls 0.23.28", "rustls-pki-types", "sha1", "thiserror 2.0.12", @@ -13813,6 +13907,15 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +dependencies = [ + "getrandom 0.3.3", +] + [[package]] name = "valuable" version = "0.1.0" @@ -13926,7 +14029,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "wasm-bindgen-shared", ] @@ -13960,7 +14063,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -14381,6 +14484,28 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core 0.61.2", + "windows-future", + "windows-link", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", +] + [[package]] name = "windows-core" version = "0.51.1" @@ -14399,6 +14524,86 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core 0.61.2", + "windows-link", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.103", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.103", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core 0.61.2", + "windows-link", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -14481,6 +14686,15 @@ dependencies = [ "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -14710,7 +14924,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -14794,7 +15008,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.1", ] @@ -14816,7 +15030,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -14836,7 +15050,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", "synstructure 0.13.1", ] @@ -14857,7 +15071,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] @@ -14890,7 +15104,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.103", ] [[package]] diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index b53cdb52ce..88983bf63a 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -383,25 +383,15 @@ impl Pallet { /// * 'SubNetworkDoesNotExist': If the specified network does not exist. /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// - pub fn user_remove_network(coldkey: T::AccountId, netuid: NetUid) -> dispatch::DispatchResult { - // --- 1. Ensure this subnet exists. + pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { + // --- Perform the dtTao-compatible cleanup before removing the network. + Self::destroy_alpha_in_out_stakes(netuid)?; + + // --- Finally, remove the network entirely. ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - - // --- 2. Ensure the caller owns this subnet. - ensure!( - SubnetOwner::::get(netuid) == coldkey, - Error::::NotSubnetOwner - ); - - // --- 4. Remove the subnet identity if it exists. - if SubnetIdentitiesV3::::take(netuid).is_some() { - Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); - } - - // --- 5. Explicitly erase the network and all its parameters. Self::remove_network(netuid); // --- Emit event. @@ -605,7 +595,7 @@ impl Pallet { LastRateLimitedBlock::::set(rate_limit_key, block); } - pub fn destroy_alpha_in_out_stakes(netuid: u16) -> DispatchResult { + pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { // 1. Ensure the subnet exists. ensure!( Self::if_subnet_exist(netuid), @@ -638,7 +628,7 @@ impl Pallet { // 4. Pro-rata distribution – TAO restaked to ROOT. let subnet_tao: u128 = SubnetTAO::::get(netuid) as u128; - let root_netuid = Self::get_root_netuid(); + let root_netuid = NetUid::ROOT; if total_alpha_out > 0 && subnet_tao > 0 && !stakers.is_empty() { struct Portion { @@ -703,15 +693,16 @@ impl Pallet { Ok(()) } - pub fn get_network_to_prune() -> Option { + pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); let total_networks: u16 = TotalNetworks::::get(); - let mut candidate_netuid: Option = None; + let mut candidate_netuid: Option = None; let mut candidate_emission = u64::MAX; let mut candidate_timestamp = u64::MAX; - for netuid in 1..=total_networks { + for net in 1..=total_networks { + let netuid: NetUid = net.into(); let registered_at = NetworkRegisteredAt::::get(netuid); // Skip immune networks diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index fd36ad15d1..e756198997 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -150,7 +150,7 @@ impl Pallet { // But do not prune yet; we only do it after all checks pass. let subnet_limit = Self::get_max_subnets(); let current_count = TotalNetworks::::get(); - let mut recycle_netuid: Option = None; + let mut recycle_netuid: Option = None; if current_count >= subnet_limit { if let Some(netuid) = Self::get_network_to_prune() { recycle_netuid = Some(netuid); @@ -181,7 +181,7 @@ impl Pallet { } // --- 10. Determine netuid to register. If we pruned a subnet, reuse that netuid. - let netuid_to_register: u16 = match recycle_netuid { + let netuid_to_register: NetUid = match recycle_netuid { Some(prune_netuid) => prune_netuid, None => Self::get_next_netuid(), }; diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 9009d0626e..5f8c5438a1 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -96,7 +96,7 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { SubtensorModule::set_subnet_locked_balance(net, 0); // α on ROOT before - let root = SubtensorModule::get_root_netuid(); + let root = NetUid::ROOT; let alpha_before_root = Alpha::::get((s_hot, s_cold, root)).saturating_to_num::(); @@ -130,7 +130,7 @@ fn dissolve_two_stakers_pro_rata_distribution() { SubtensorModule::set_subnet_locked_balance(net, 5_000u64); // α on ROOT before - let root = SubtensorModule::get_root_netuid(); + let root = NetUid::ROOT; let a1_root_before = Alpha::::get((s1_hot, s1_cold, root)).saturating_to_num::(); let a2_root_before = Alpha::::get((s2_hot, s2_cold, root)).saturating_to_num::(); @@ -215,7 +215,7 @@ fn dissolve_zero_refund_when_emission_exceeds_lock() { fn dissolve_nonexistent_subnet_fails() { new_test_ext(0).execute_with(|| { assert_err!( - SubtensorModule::do_dissolve_network(9_999), + SubtensorModule::do_dissolve_network(9_999.into()), Error::::SubNetworkDoesNotExist ); }); @@ -408,7 +408,7 @@ fn dissolve_rounding_remainder_distribution() { SubtensorModule::set_subnet_locked_balance(net, 0); // 2. α on ROOT before - let root = SubtensorModule::get_root_netuid(); + let root = NetUid::ROOT; let a1_before = Alpha::::get((s1h, s1c, root)).saturating_to_num::(); let a2_before = Alpha::::get((s2h, s2c, root)).saturating_to_num::(); @@ -476,7 +476,7 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { SubtensorModule::set_subnet_locked_balance(netuid, 5_000); // 6. Balances & α on the *root* network *before* - let root = SubtensorModule::get_root_netuid(); + let root = NetUid::ROOT; let bal1_before = SubtensorModule::get_coldkey_balance(&c1); let bal2_before = SubtensorModule::get_coldkey_balance(&c2); let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); @@ -567,7 +567,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { SubnetOwnerCut::::put(32_768u16); // 50 % // 4. Balances & α on root *before* - let root = SubtensorModule::get_root_netuid(); + let root = NetUid::ROOT; let mut bal_before = [0u64; N]; let mut alpha_before_root = [0u64; N]; for i in 0..N { @@ -739,7 +739,7 @@ fn register_network_under_limit_success() { )); assert_eq!(TotalNetworks::::get(), total_before + 1); - let new_id = TotalNetworks::::get(); + let new_id: NetUid = TotalNetworks::::get().into(); assert_eq!(SubnetOwner::::get(new_id), cold); assert_eq!(SubnetOwnerHotkey::::get(new_id), hot); }); From f42d45fe8672d8f6644e97b33154414f674cb605 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 18 Jun 2025 07:48:42 -0700 Subject: [PATCH 029/379] fmt --- pallets/subtensor/src/coinbase/root.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 88983bf63a..364b7aff03 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -383,7 +383,7 @@ impl Pallet { /// * 'SubNetworkDoesNotExist': If the specified network does not exist. /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// - pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { + pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { // --- Perform the dtTao-compatible cleanup before removing the network. Self::destroy_alpha_in_out_stakes(netuid)?; From 77ceec290defe2a77411a6b31391f181eeb61d01 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 18 Jun 2025 08:25:43 -0700 Subject: [PATCH 030/379] add migration for immunity_period --- pallets/subtensor/src/macros/hooks.rs | 4 +- .../migrate_network_immunity_period.rs | 40 +++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/tests/networks.rs | 36 +++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 pallets/subtensor/src/migrations/migrate_network_immunity_period.rs diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 5e30388735..fd2483f294 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -117,7 +117,9 @@ mod hooks { // Reset max burn .saturating_add(migrations::migrate_reset_max_burn::migrate_reset_max_burn::()) // Migrate ColdkeySwapScheduled structure to new format - .saturating_add(migrations::migrate_coldkey_swap_scheduled::migrate_coldkey_swap_scheduled::()); + .saturating_add(migrations::migrate_coldkey_swap_scheduled::migrate_coldkey_swap_scheduled::()) + // Migrate Immunity Period + .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs b/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs new file mode 100644 index 0000000000..a9fcea21e3 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_network_immunity_period.rs @@ -0,0 +1,40 @@ +use crate::{Config, Event, HasMigrationRun, NetworkImmunityPeriod, Pallet, Weight}; +use scale_info::prelude::string::String; + +pub fn migrate_network_immunity_period() -> Weight { + use frame_support::traits::Get; + + const NEW_VALUE: u64 = 864_000; + + let migration_name = b"migrate_network_immunity_period".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // Skip if already executed + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + // ── 1) Set new value ───────────────────────────────────────────────────── + NetworkImmunityPeriod::::put(NEW_VALUE); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + Pallet::::deposit_event(Event::NetworkImmunityPeriodSet(NEW_VALUE)); + + // ── 2) Mark migration done ─────────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed - NetworkImmunityPeriod => {}.", + String::from_utf8_lossy(&migration_name), + NEW_VALUE + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index ea2cff1458..b6f54d9096 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -12,6 +12,7 @@ pub mod migrate_delete_subnet_3; pub mod migrate_fix_is_network_member; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; +pub mod migrate_network_immunity_period; pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 5f8c5438a1..ef2d465e91 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1,4 +1,5 @@ use super::mock::*; +use crate::migrations::migrate_network_immunity_period; use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; @@ -814,6 +815,41 @@ fn register_network_fails_before_prune_keeps_existing() { }); } +#[test] +fn test_migrate_network_immunity_period() { + new_test_ext(0).execute_with(|| { + // -------------------------------------------------------------------- + // ‼️ PRE-CONDITIONS + // -------------------------------------------------------------------- + assert_ne!(NetworkImmunityPeriod::::get(), 864_000); + assert!( + !HasMigrationRun::::get(b"migrate_network_immunity_period".to_vec()), + "HasMigrationRun should be false before migration" + ); + + // -------------------------------------------------------------------- + // ▶️ RUN MIGRATION + // -------------------------------------------------------------------- + let weight = migrate_network_immunity_period::migrate_network_immunity_period::(); + + // -------------------------------------------------------------------- + // ✅ POST-CONDITIONS + // -------------------------------------------------------------------- + assert_eq!( + NetworkImmunityPeriod::::get(), + 864_000, + "NetworkImmunityPeriod should now be 864_000" + ); + + assert!( + HasMigrationRun::::get(b"migrate_network_immunity_period".to_vec()), + "HasMigrationRun should be true after migration" + ); + + assert!(weight != Weight::zero(), "migration weight should be > 0"); + }); +} + // #[test] // fn test_schedule_dissolve_network_execution() { // new_test_ext(1).execute_with(|| { From a2913d976a20f9ed9846c0dc910ef196b8e5b428 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 22 Jun 2025 09:52:53 -0700 Subject: [PATCH 031/379] fix cargo lock --- Cargo.lock | 292 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 168 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75c4e28945..3089764ed3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -237,7 +237,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -713,7 +713,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "synstructure 0.13.2", ] @@ -736,7 +736,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -885,7 +885,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -941,7 +941,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1053,7 +1053,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -1561,7 +1561,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2146,7 +2146,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2174,7 +2174,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2187,7 +2187,7 @@ dependencies = [ "codespan-reporting", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2205,7 +2205,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2253,7 +2253,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2275,7 +2275,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2314,7 +2314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2384,7 +2384,7 @@ checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2395,7 +2395,7 @@ checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2408,7 +2408,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2437,7 +2437,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2448,7 +2448,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "unicode-xid", ] @@ -2538,7 +2538,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2562,7 +2562,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.103", + "syn 2.0.104", "termcolor", "toml 0.8.23", "walkdir", @@ -2610,7 +2610,7 @@ checksum = "7e8671d54058979a37a26f3511fbf8d198ba1aa35ffb202c42587d918d77213a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2730,7 +2730,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2750,7 +2750,7 @@ checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -2780,12 +2780,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2975,7 +2975,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3039,7 +3039,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3720,7 +3720,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3733,7 +3733,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3745,7 +3745,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3756,7 +3756,7 @@ checksum = "68672b9ec6fe72d259d3879dc212c5e42e977588cdac830c76f54d9f492aeb58" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3766,7 +3766,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -3940,7 +3940,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -4867,7 +4867,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -5256,7 +5256,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -5784,7 +5784,7 @@ dependencies = [ "proc-macro-warning 0.4.2", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6192,7 +6192,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6206,7 +6206,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6217,7 +6217,7 @@ checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6228,7 +6228,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -6434,7 +6434,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -7065,7 +7065,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -7167,7 +7167,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -7997,7 +7997,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8174,7 +8174,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8224,7 +8224,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8551,7 +8551,7 @@ dependencies = [ "polkavm-common 0.9.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8563,7 +8563,7 @@ dependencies = [ "polkavm-common 0.24.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8573,7 +8573,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl 0.9.0", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8583,7 +8583,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba0ef0f17ad81413ea1ca5b1b67553aedf5650c88269b673d3ba015c83bc2651" dependencies = [ "polkavm-derive-impl 0.24.0", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8710,7 +8710,7 @@ dependencies = [ "proc-macro2", "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8755,12 +8755,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" +checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" dependencies = [ "proc-macro2", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8859,7 +8859,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8870,7 +8870,7 @@ checksum = "75eea531cfcd120e0851a3f8aed42c4841f78c889eefafd96339c72677ae42c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8898,7 +8898,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8935,7 +8935,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -8994,7 +8994,7 @@ dependencies = [ "prost 0.13.5", "prost-types", "regex", - "syn 2.0.103", + "syn 2.0.104", "tempfile", ] @@ -9008,7 +9008,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -9021,7 +9021,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -9350,7 +9350,7 @@ checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -9505,7 +9505,7 @@ checksum = "652db34deaaa57929e10ca18e5454a32cb0efc351ae80d320334bbf907b908b3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -9674,13 +9674,26 @@ dependencies = [ name = "rustix" version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.9.4", + "windows-sys 0.59.0", ] [[package]] @@ -10043,7 +10056,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -10977,7 +10990,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11105,7 +11118,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11131,7 +11144,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11153,7 +11166,7 @@ dependencies = [ "proc-macro2", "quote", "scale-info", - "syn 2.0.103", + "syn 2.0.104", "thiserror 1.0.69", ] @@ -11444,7 +11457,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11505,7 +11518,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -11902,7 +11915,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12107,7 +12120,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -12178,7 +12191,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12197,23 +12210,23 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "environmental", "parity-scale-codec", @@ -12393,7 +12406,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -12431,14 +12444,14 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12451,7 +12464,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12533,12 +12546,12 @@ source = "git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "impl-serde 0.5.0", "parity-scale-codec", @@ -12574,7 +12587,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "parity-scale-codec", "tracing", @@ -12664,13 +12677,13 @@ dependencies = [ "proc-macro-warning 1.84.1", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#f6cd17e550caeaa1b8184b5f3135ca21f2cb16eb" +source = "git+https://github.com/paritytech/polkadot-sdk#5072bf9b93dc1c9dff0161ab6efe2799036045e9" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -12793,7 +12806,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -12814,7 +12827,7 @@ dependencies = [ "sha2 0.10.9", "sqlx-core", "sqlx-sqlite", - "syn 2.0.103", + "syn 2.0.104", "tokio", "url", ] @@ -12969,7 +12982,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13074,7 +13087,7 @@ dependencies = [ "quote", "rayon", "subtensor-linting", - "syn 2.0.103", + "syn 2.0.104", "walkdir", ] @@ -13114,7 +13127,7 @@ dependencies = [ "proc-macro2", "procedural-fork", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13124,7 +13137,7 @@ dependencies = [ "ahash 0.8.12", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13254,7 +13267,7 @@ dependencies = [ "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.103", + "syn 2.0.104", "thiserror 1.0.69", "tokio", ] @@ -13315,7 +13328,7 @@ dependencies = [ "quote", "scale-typegen", "subxt-codegen", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13368,9 +13381,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.103" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -13397,7 +13410,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13503,7 +13516,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13514,7 +13527,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13681,7 +13694,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13870,7 +13883,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -13913,7 +13926,7 @@ dependencies = [ "proc-macro-crate 3.3.0", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14395,7 +14408,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -14430,7 +14443,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -14785,23 +14798,23 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.0", + "webpki-roots 1.0.1", ] [[package]] name = "webpki-roots" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] [[package]] name = "wide" -version = "0.7.32" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" +checksum = "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03" dependencies = [ "bytemuck", "safe_arch", @@ -14850,8 +14863,30 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efc5cf48f83140dcaab716eeaea345f9e93d0018fb81162753a3f76c3397b538" dependencies = [ - "windows-core 0.51.1", - "windows-targets 0.48.5", + "windows-core 0.53.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core 0.61.2", + "windows-future", + "windows-link", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core 0.61.2", ] [[package]] @@ -14896,7 +14931,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14907,7 +14942,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -14989,6 +15024,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -15337,7 +15381,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -15421,7 +15465,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "synstructure 0.13.2", ] @@ -15442,7 +15486,7 @@ checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -15462,7 +15506,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", "synstructure 0.13.2", ] @@ -15483,7 +15527,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] @@ -15516,7 +15560,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.103", + "syn 2.0.104", ] [[package]] From 800471f1c1289545c1e47ca0198bfe4c1519d065 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 08:42:01 -0700 Subject: [PATCH 032/379] add root_dissolve_network --- pallets/subtensor/src/macros/dispatches.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index f480f018ac..820b21c40b 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2069,5 +2069,19 @@ mod dispatches { PendingChildKeyCooldown::::put(cooldown); Ok(()) } + + /// Remove a user's subnetwork + /// The caller must be root + #[pallet::call_index(110)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] + pub fn root_dissolve_network( + origin: OriginFor, + netuid: NetUid, + ) -> DispatchResult { + ensure_root(origin)?; + Self::do_dissolve_network(netuid) + } } } From 6d86784e04560e0cbc62bd2ec81c997dab5d684c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 08:42:51 -0700 Subject: [PATCH 033/379] fmt --- pallets/subtensor/src/macros/dispatches.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 820b21c40b..f845bf5563 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2076,10 +2076,7 @@ mod dispatches { #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] - pub fn root_dissolve_network( - origin: OriginFor, - netuid: NetUid, - ) -> DispatchResult { + pub fn root_dissolve_network(origin: OriginFor, netuid: NetUid) -> DispatchResult { ensure_root(origin)?; Self::do_dissolve_network(netuid) } From 43a9270dfee80b81e5f7972e47028544889fe719 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 08:53:11 -0700 Subject: [PATCH 034/379] clippy --- pallets/subtensor/src/coinbase/root.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 364b7aff03..94e2210d32 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -668,7 +668,7 @@ impl Pallet { for p in portions { if p.share > 0 { // Zero-fee restake of TAO into the root network. - Self::stake_into_subnet(&p.hot, &p.cold, root_netuid, p.share, 0u64); + Self::stake_into_subnet(&p.hot, &p.cold, root_netuid, p.share, 0u64)?; } Alpha::::remove((&p.hot, &p.cold, netuid)); } From 8e9b08cf103f9598ea3ed6d7b247e495924088f1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 09:06:27 -0700 Subject: [PATCH 035/379] rm DefaultStakingFee --- pallets/subtensor/src/tests/networks.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index ef2d465e91..b78004452d 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -445,8 +445,7 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { register_ok_neuron(netuid, h2, c2, 0); // 3. Stake 30 : 70 (s1 : s2) in TAO - let min_total = - DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); + let min_total = DefaultMinStake::::get(); let s1 = 3 * min_total; let s2 = 7 * min_total; @@ -526,8 +525,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { SubtensorModule::set_max_registrations_per_block(netuid, 1_000u16); SubtensorModule::set_target_registrations_per_interval(netuid, 1_000u16); - let min_total = - DefaultMinStake::::get().saturating_add(DefaultStakingFee::::get()); + let min_total = DefaultMinStake::::get(); const N: usize = 20; let mut cold = [U256::zero(); N]; From 1edbfd644954bb49e0124e4abc43544270031c40 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 09:21:02 -0700 Subject: [PATCH 036/379] fix test --- pallets/subtensor/src/tests/networks.rs | 57 ++++++++++++++----------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index b78004452d..1ba49401de 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -5,6 +5,7 @@ use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; use substrate_fixed::types::U64F64; +use subtensor_swap_interface::SwapHandler; #[test] fn test_registration_ok() { @@ -518,24 +519,30 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { #[test] fn destroy_alpha_out_many_stakers_complex_distribution() { new_test_ext(0).execute_with(|| { - // 1. Subnet with 20 stakers + // ── 1) create subnet with 20 stakers ──────────────────────────────── let owner_cold = U256::from(1_000); - let owner_hot = U256::from(2_000); + let owner_hot = U256::from(2_000); let netuid = add_dynamic_network(&owner_hot, &owner_cold); SubtensorModule::set_max_registrations_per_block(netuid, 1_000u16); SubtensorModule::set_target_registrations_per_interval(netuid, 1_000u16); - let min_total = DefaultMinStake::::get(); + // Runtime-exact min amount = min_stake + fee + let min_amount = { + let min_stake = DefaultMinStake::::get(); + // Use the same helper pallet uses in validate_add_stake + let fee = ::SwapInterface::approx_fee_amount(netuid.into(), min_stake); + min_stake.saturating_add(fee) + }; const N: usize = 20; - let mut cold = [U256::zero(); N]; - let mut hot = [U256::zero(); N]; + let mut cold = [U256::zero(); N]; + let mut hot = [U256::zero(); N]; let mut stake = [0u64; N]; for i in 0..N { - cold[i] = U256::from(10_000 + 2 * i as u32); - hot[i] = U256::from(10_001 + 2 * i as u32); - stake[i] = (i as u64 + 1) * min_total; + cold[i] = U256::from(10_000 + 2 * i as u32); + hot[i] = U256::from(10_001 + 2 * i as u32); + stake[i] = (i as u64 + 1) * min_amount; // multiples of min_amount register_ok_neuron(netuid, hot[i], cold[i], 0); SubtensorModule::add_balance_to_coldkey_account(&cold[i], stake[i] + 100_000); @@ -548,7 +555,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { )); } - // 2. α-out snapshot + // ── 2) α-out snapshot ─────────────────────────────────────────────── let mut alpha = [0u128; N]; let mut alpha_sum: u128 = 0; for i in 0..N { @@ -556,35 +563,37 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { alpha_sum += alpha[i]; } - // 3. TAO pot & lock + // ── 3) TAO pot & subnet lock ──────────────────────────────────────── let tao_pot: u64 = 123_456; - let lock: u64 = 30_000; + let lock : u64 = 30_000; SubnetTAO::::insert(netuid, tao_pot); SubtensorModule::set_subnet_locked_balance(netuid, lock); - Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); // owner earned - SubnetOwnerCut::::put(32_768u16); // 50 % + // Owner already earned some emission; owner-cut = 50 % + Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); + SubnetOwnerCut::::put(32_768u16); // = 0.5 in fixed-point - // 4. Balances & α on root *before* + // ── 4) balances & α on ROOT before ────────────────────────────────── let root = NetUid::ROOT; let mut bal_before = [0u64; N]; let mut alpha_before_root = [0u64; N]; for i in 0..N { bal_before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); - alpha_before_root[i] = Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); + alpha_before_root[i] = + Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); } let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); - // 5. Expected TAO share per algorithm (incl. remainder rule) + // ── 5) expected TAO share per pallet algorithm (incl. remainder) ──── let mut share = [0u64; N]; - let mut rem = [0u128; N]; - let mut paid: u128 = 0; + let mut rem = [0u128; N]; + let mut paid : u128 = 0; for i in 0..N { let prod = tao_pot as u128 * alpha[i]; share[i] = (prod / alpha_sum) as u64; - rem[i] = prod % alpha_sum; - paid += share[i] as u128; + rem[i] = prod % alpha_sum; + paid += share[i] as u128; } let leftover = tao_pot as u128 - paid; let mut idx: Vec<_> = (0..N).collect(); @@ -593,10 +602,10 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { share[idx[i]] += 1; } - // 6. Run burn-and-restake + // ── 6) run burn-and-restake ──────────────────────────────────────── assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); - // 7. Post-assertions + // ── 7) post checks ────────────────────────────────────────────────── for i in 0..N { // cold-key balances unchanged assert_eq!( @@ -606,7 +615,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { i ); - // α added on ROOT = TAO share + // α added on ROOT == TAO share let alpha_after_root: u64 = Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); @@ -619,7 +628,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { } // owner refund - let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same calc as pallet + let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same math pallet uses let expected_refund = lock.saturating_sub(owner_em); assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), From 2c5273255212e6cc59cf82751c4502662ffc0487 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 23 Jun 2025 09:21:14 -0700 Subject: [PATCH 037/379] fmt --- pallets/subtensor/src/tests/networks.rs | 34 +++++++++++++------------ 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 1ba49401de..4d8cf1dedc 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -521,7 +521,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { new_test_ext(0).execute_with(|| { // ── 1) create subnet with 20 stakers ──────────────────────────────── let owner_cold = U256::from(1_000); - let owner_hot = U256::from(2_000); + let owner_hot = U256::from(2_000); let netuid = add_dynamic_network(&owner_hot, &owner_cold); SubtensorModule::set_max_registrations_per_block(netuid, 1_000u16); SubtensorModule::set_target_registrations_per_interval(netuid, 1_000u16); @@ -530,19 +530,22 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { let min_amount = { let min_stake = DefaultMinStake::::get(); // Use the same helper pallet uses in validate_add_stake - let fee = ::SwapInterface::approx_fee_amount(netuid.into(), min_stake); + let fee = ::SwapInterface::approx_fee_amount( + netuid.into(), + min_stake, + ); min_stake.saturating_add(fee) }; const N: usize = 20; - let mut cold = [U256::zero(); N]; - let mut hot = [U256::zero(); N]; + let mut cold = [U256::zero(); N]; + let mut hot = [U256::zero(); N]; let mut stake = [0u64; N]; for i in 0..N { - cold[i] = U256::from(10_000 + 2 * i as u32); - hot[i] = U256::from(10_001 + 2 * i as u32); - stake[i] = (i as u64 + 1) * min_amount; // multiples of min_amount + cold[i] = U256::from(10_000 + 2 * i as u32); + hot[i] = U256::from(10_001 + 2 * i as u32); + stake[i] = (i as u64 + 1) * min_amount; // multiples of min_amount register_ok_neuron(netuid, hot[i], cold[i], 0); SubtensorModule::add_balance_to_coldkey_account(&cold[i], stake[i] + 100_000); @@ -565,13 +568,13 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { // ── 3) TAO pot & subnet lock ──────────────────────────────────────── let tao_pot: u64 = 123_456; - let lock : u64 = 30_000; + let lock: u64 = 30_000; SubnetTAO::::insert(netuid, tao_pot); SubtensorModule::set_subnet_locked_balance(netuid, lock); // Owner already earned some emission; owner-cut = 50 % Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); - SubnetOwnerCut::::put(32_768u16); // = 0.5 in fixed-point + SubnetOwnerCut::::put(32_768u16); // = 0.5 in fixed-point // ── 4) balances & α on ROOT before ────────────────────────────────── let root = NetUid::ROOT; @@ -579,21 +582,20 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { let mut alpha_before_root = [0u64; N]; for i in 0..N { bal_before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); - alpha_before_root[i] = - Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); + alpha_before_root[i] = Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); } let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); // ── 5) expected TAO share per pallet algorithm (incl. remainder) ──── let mut share = [0u64; N]; - let mut rem = [0u128; N]; - let mut paid : u128 = 0; + let mut rem = [0u128; N]; + let mut paid: u128 = 0; for i in 0..N { let prod = tao_pot as u128 * alpha[i]; share[i] = (prod / alpha_sum) as u64; - rem[i] = prod % alpha_sum; - paid += share[i] as u128; + rem[i] = prod % alpha_sum; + paid += share[i] as u128; } let leftover = tao_pot as u128 - paid; let mut idx: Vec<_> = (0..N).collect(); @@ -628,7 +630,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { } // owner refund - let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same math pallet uses + let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same math pallet uses let expected_refund = lock.saturating_sub(owner_em); assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), From 7e4d477d4667d3e4efe335ff862fa23fea0d2c1b Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 7 Aug 2025 11:50:40 +0800 Subject: [PATCH 038/379] more test cases for different types storage --- .../test/runtime.call.precompile.test.ts | 122 +++++++++++++++--- 1 file changed, 102 insertions(+), 20 deletions(-) diff --git a/evm-tests/test/runtime.call.precompile.test.ts b/evm-tests/test/runtime.call.precompile.test.ts index 1b96c474a5..6ae1973593 100644 --- a/evm-tests/test/runtime.call.precompile.test.ts +++ b/evm-tests/test/runtime.call.precompile.test.ts @@ -1,26 +1,40 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { generateRandomEthersWallet, getPublicClient } from "../src/utils"; import { IDISPATCH_ADDRESS, ISTORAGE_QUERY_ADDRESS, ETH_LOCAL_URL } from "../src/config"; import { devnet, MultiAddress } from "@polkadot-api/descriptors" -import { hexToNumber, PublicClient } from "viem"; -import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { PublicClient } from "viem"; +import { PolkadotSigner, TypedApi, getTypedCodecs } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" -import { forceSetBalanceToEthAddress, setMaxChildkeyTake } from "../src/subtensor"; -import { xxhashAsU8a } from '@polkadot/util-crypto'; -import { u8aToHex } from '@polkadot/util'; +import { forceSetBalanceToEthAddress, setMaxChildkeyTake, burnedRegister, forceSetBalanceToSs58Address, addStake, setTxRateLimit, addNewSubnetwork, startCall, setTempo } from "../src/subtensor"; describe("Test the dispatch precompile", () => { let publicClient: PublicClient; const wallet1 = generateRandomEthersWallet(); let api: TypedApi let alice: PolkadotSigner; + const hotkey = getRandomSubstrateKeypair(); + const coldkey = getRandomSubstrateKeypair(); + let netuid: number; before(async () => { publicClient = await getPublicClient(ETH_LOCAL_URL) api = await getDevnetApi() alice = await getAliceSigner() await forceSetBalanceToEthAddress(api, wallet1.address) + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + + netuid = await addNewSubnetwork(api, hotkey, coldkey) + // set tempo big enough to avoid stake value updated with fast block feature + await setTempo(api, netuid, 10000) + await startCall(api, netuid, coldkey) + await setTxRateLimit(api, BigInt(0)) + + await burnedRegister(api, netuid, convertPublicKeyToSs58(hotkey.publicKey), coldkey) + await addStake(api, netuid, convertPublicKeyToSs58(hotkey.publicKey), BigInt(1_000_000_000), coldkey) }) it("Dispatch transfer call via precompile contract works correctly", async () => { @@ -49,13 +63,8 @@ describe("Test the dispatch precompile", () => { }) - it("Storage query call via precompile contract works correctly", async () => { - const palletPrefixBytes = xxhashAsU8a("SubtensorModule", 128); - const storageItemPrefixBytes = xxhashAsU8a("MaxChildkeyTake", 128); - const fullStorageKeyBytes = new Uint8Array([...palletPrefixBytes, ...storageItemPrefixBytes]); - // 0x658faa385070e074c85bf6b568cf0555dba018859cab7e989f77669457b394be - // key for max child key take - const fullStorageKeyHex = u8aToHex(fullStorageKeyBytes); + it("Value type storage query call via precompile contract works correctly", async () => { + const key = await api.query.SubtensorModule.MaxChildkeyTake.getKey(); let maxChildkeyTake = 257; await setMaxChildkeyTake(api, maxChildkeyTake) @@ -63,13 +72,86 @@ describe("Test the dispatch precompile", () => { api.query.SubtensorModule.MaxChildkeyTake.getValue(); const rawCallResponse = await publicClient.call({ to: ISTORAGE_QUERY_ADDRESS, - data: fullStorageKeyHex, + data: key.toString() as `0x${string}`, + }) + const rawResultData = rawCallResponse.data ?? ""; + + const codec = await getTypedCodecs(devnet); + const maxChildkeyTakeCodec = codec.query.SubtensorModule.MaxChildkeyTake.value; + const maxChildkeyTakeFromContract = maxChildkeyTakeCodec.dec(rawResultData); + assert.equal(maxChildkeyTakeFromContract, maxChildkeyTake, "value should be 257") + }) + + it("Map type storage query call via precompile contract works correctly", async () => { + + const key = await api.query.SubtensorModule.Tempo.getKey(netuid); + + const tempoOnChain = await api.query.SubtensorModule.Tempo.getValue(netuid); + const rawCallResponse = await publicClient.call({ + to: ISTORAGE_QUERY_ADDRESS, + data: key.toString() as `0x${string}`, + }) + const rawResultData = rawCallResponse.data ?? ""; + + const codec = await getTypedCodecs(devnet); + const maxChildkeyTakeValueCodec = codec.query.SubtensorModule.Tempo.value; + const decodedValue = maxChildkeyTakeValueCodec.dec(rawResultData); + assert.equal(tempoOnChain, decodedValue, "value should be the same as on chain") + }) + + it("Double map type storage query call via precompile contract works correctly", async () => { + const key = await api.query.SubtensorModule.TotalHotkeyAlpha.getKey(convertPublicKeyToSs58(hotkey.publicKey), netuid); + const totalHotkeyAlphaOnChain = await api.query.SubtensorModule.TotalHotkeyAlpha.getValue(convertPublicKeyToSs58(hotkey.publicKey), netuid); + + const rawCallResponse = await publicClient.call({ + to: ISTORAGE_QUERY_ADDRESS, + data: key.toString() as `0x${string}`, }) - const rawResultData = rawCallResponse.data; - if (rawResultData === undefined) { - throw new Error("rawResultData is undefined"); - } - let value = hexToNumber(rawResultData); - assert.equal(value, maxChildkeyTake, "value should be 257") + const rawResultData = rawCallResponse.data ?? ""; + const codec = await getTypedCodecs(devnet); + const totalHotkeyAlphaValueCodec = codec.query.SubtensorModule.TotalHotkeyAlpha.value; + const decodedValue = totalHotkeyAlphaValueCodec.dec(rawResultData); + assert.equal(totalHotkeyAlphaOnChain, decodedValue, "value should be the same as on chain") + }) + + // Polkadot api can't decode the boolean type for now. + // it("Double map type storage query call via precompile contract works correctly", async () => { + + // const storageItemPrefixBytes = xxhashAsU8a("IsNetworkMember", 128); + // const codec = await getTypedCodecs(devnet); + // const isNetworkMemberArgsCodec = codec.query.SubtensorModule.IsNetworkMember.args; + // const encodedArgs = isNetworkMemberArgsCodec.enc([convertPublicKeyToSs58(alice.publicKey), netuid]); + + // console.log(encodedArgs) + + // const fullStorageKeyBytes = new Uint8Array([...palletPrefixBytes, ...storageItemPrefixBytes, ...encodedArgs]); + + // console.log(fullStorageKeyBytes) + + // const fullStorageKeyHex = u8aToHex(fullStorageKeyBytes); + + // console.log(fullStorageKeyHex) + + // const isNetworkMemberOnChain = await api.query.SubtensorModule.IsNetworkMember.getValue(convertPublicKeyToSs58(alice.publicKey), netuid); + + // console.log(isNetworkMemberOnChain) + + // const rawCallResponse = await publicClient.call({ + // to: ISTORAGE_QUERY_ADDRESS, + // data: fullStorageKeyHex, + // }) + + // console.log(rawCallResponse) + + // const rawResultData = rawCallResponse.data ?? ""; + + // const isNetworkMemberValueCodec = codec.query.SubtensorModule.IsNetworkMember.value; + // const decodedValue = isNetworkMemberValueCodec.dec(rawResultData); + + // console.log(decodedValue) + + // assert.equal(isNetworkMemberOnChain, decodedValue, "value should be the same as on chain") + // }) + }); From be146d375b68c9143c842b912ae170eff8effa83 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 7 Aug 2025 11:52:11 +0800 Subject: [PATCH 039/379] more test cases for different types storage --- .../test/runtime.call.precompile.test.ts | 28 ++----------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/evm-tests/test/runtime.call.precompile.test.ts b/evm-tests/test/runtime.call.precompile.test.ts index 6ae1973593..dd91ce7f79 100644 --- a/evm-tests/test/runtime.call.precompile.test.ts +++ b/evm-tests/test/runtime.call.precompile.test.ts @@ -117,40 +117,18 @@ describe("Test the dispatch precompile", () => { // Polkadot api can't decode the boolean type for now. // it("Double map type storage query call via precompile contract works correctly", async () => { - - // const storageItemPrefixBytes = xxhashAsU8a("IsNetworkMember", 128); - // const codec = await getTypedCodecs(devnet); - // const isNetworkMemberArgsCodec = codec.query.SubtensorModule.IsNetworkMember.args; - // const encodedArgs = isNetworkMemberArgsCodec.enc([convertPublicKeyToSs58(alice.publicKey), netuid]); - - // console.log(encodedArgs) - - // const fullStorageKeyBytes = new Uint8Array([...palletPrefixBytes, ...storageItemPrefixBytes, ...encodedArgs]); - - // console.log(fullStorageKeyBytes) - - // const fullStorageKeyHex = u8aToHex(fullStorageKeyBytes); - - // console.log(fullStorageKeyHex) + // const key = await api.query.SubtensorModule.IsNetworkMember.getKey(convertPublicKeyToSs58(alice.publicKey), netuid); // const isNetworkMemberOnChain = await api.query.SubtensorModule.IsNetworkMember.getValue(convertPublicKeyToSs58(alice.publicKey), netuid); - - // console.log(isNetworkMemberOnChain) - // const rawCallResponse = await publicClient.call({ // to: ISTORAGE_QUERY_ADDRESS, - // data: fullStorageKeyHex, + // data: key.toString() as `0x${string}`, // }) - // console.log(rawCallResponse) - // const rawResultData = rawCallResponse.data ?? ""; - + // const codec = await getTypedCodecs(devnet); // const isNetworkMemberValueCodec = codec.query.SubtensorModule.IsNetworkMember.value; // const decodedValue = isNetworkMemberValueCodec.dec(rawResultData); - - // console.log(decodedValue) - // assert.equal(isNetworkMemberOnChain, decodedValue, "value should be the same as on chain") // }) From a99244f910d0d816abf9d78f4bd290e8971eeeb6 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 7 Aug 2025 11:57:22 +0800 Subject: [PATCH 040/379] bump version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index db35faf7e0..64f48294e4 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -218,7 +218,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 299, + spec_version: 300, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 332701a7cf72de6ad103434208fc7413c8f12669 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 7 Aug 2025 12:42:04 +0800 Subject: [PATCH 041/379] upgrade polkadot api version --- evm-tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evm-tests/package.json b/evm-tests/package.json index 9970967a88..8ac57b6e93 100644 --- a/evm-tests/package.json +++ b/evm-tests/package.json @@ -15,7 +15,7 @@ "dotenv": "16.4.7", "ethers": "^6.13.5", "mocha": "^11.1.0", - "polkadot-api": "^1.9.5", + "polkadot-api": "^1.11.0", "scale-ts": "^1.6.1", "viem": "2.23.4", "ws": "^8.18.2" From ea4e39f2d220c7c9bc7add3f17ac5704c64da60f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 12 Aug 2025 05:57:01 +0000 Subject: [PATCH 042/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 1f9d85d08b..549a87d28d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -693,7 +693,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(28_150_000, 0) + #[pallet::weight((Weight::from_parts(43_000_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon( @@ -827,7 +827,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(30_170_000, 0) + #[pallet::weight((Weight::from_parts(22_480_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -915,7 +915,7 @@ mod dispatches { /// Attempt to adjust the senate membership to include a hotkey #[pallet::call_index(63)] - #[pallet::weight((Weight::from_parts(48_160_000, 0) + #[pallet::weight((Weight::from_parts(63_690_000, 0) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)), DispatchClass::Normal, Pays::Yes))] pub fn adjust_senate(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { @@ -924,7 +924,7 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] - #[pallet::weight((Weight::from_parts(278_400_000, 0) + #[pallet::weight((Weight::from_parts(378_800_000, 0) .saturating_add(T::DbWeight::get().reads(49)) .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::No))] pub fn burned_register( @@ -1956,7 +1956,7 @@ mod dispatches { /// Emits a `FirstEmissionBlockNumberSet` event on success. #[pallet::call_index(92)] #[pallet::weight(( - Weight::from_parts(24_370_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 2)), + Weight::from_parts(32_710_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 2)), DispatchClass::Operational, Pays::Yes ))] @@ -2020,7 +2020,7 @@ mod dispatches { /// Emits a `TokensRecycled` event on success. #[pallet::call_index(101)] #[pallet::weight(( - Weight::from_parts(76_470_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 4)), + Weight::from_parts(100_700_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 4)), DispatchClass::Operational, Pays::Yes ))] @@ -2045,7 +2045,7 @@ mod dispatches { /// Emits a `TokensBurned` event on success. #[pallet::call_index(102)] #[pallet::weight(( - Weight::from_parts(74_650_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 3)), + Weight::from_parts(97_500_000, 0).saturating_add(T::DbWeight::get().reads_writes(7, 3)), DispatchClass::Operational, Pays::Yes ))] From ebfba9434796cf1394f64095ca968b1d35fa6e55 Mon Sep 17 00:00:00 2001 From: open-junius Date: Tue, 12 Aug 2025 21:34:16 +0800 Subject: [PATCH 043/379] add remove case in wrap contract --- evm-tests/src/contracts/stakeWrap.sol | 15 ++++++++++ evm-tests/src/contracts/stakeWrap.ts | 29 ++++++++++++++++++- .../test/staking.precompile.wrap.test.ts | 23 ++++++++++++++- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/evm-tests/src/contracts/stakeWrap.sol b/evm-tests/src/contracts/stakeWrap.sol index 78f3e9f999..c5c9f7ae53 100644 --- a/evm-tests/src/contracts/stakeWrap.sol +++ b/evm-tests/src/contracts/stakeWrap.sol @@ -62,4 +62,19 @@ contract StakeWrap { (bool success, ) = ISTAKING_ADDRESS.call{gas: gasleft()}(data); require(success, "addStakeLimit call failed"); } + + function removeStake( + bytes32 hotkey, + uint256 netuid, + uint256 amount + ) external { + bytes memory data = abi.encodeWithSelector( + Staking.removeStake.selector, + hotkey, + amount, + netuid + ); + (bool success, ) = ISTAKING_ADDRESS.call{gas: gasleft()}(data); + require(success, "addStake call failed"); + } } diff --git a/evm-tests/src/contracts/stakeWrap.ts b/evm-tests/src/contracts/stakeWrap.ts index 16434addc8..a6a24bdb21 100644 --- a/evm-tests/src/contracts/stakeWrap.ts +++ b/evm-tests/src/contracts/stakeWrap.ts @@ -4,6 +4,29 @@ export const abi = [ "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "hotkey", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "netuid", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "removeStake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -59,8 +82,12 @@ export const abi = [ "outputs": [], "stateMutability": "nonpayable", "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" } ]; // compiled with 0.8.20 -export const bytecode = "608060405234801561000f575f80fd5b5061069e8061001d5f395ff3fe60806040526004361061002c575f3560e01c80632daedd521461003757806390b9d5341461005f57610033565b3661003357005b5f80fd5b348015610042575f80fd5b5061005d60048036038101906100589190610357565b610087565b005b34801561006a575f80fd5b50610085600480360381019061008091906103dc565b6101b7565b005b5f631fc9b14160e01b8483856040516024016100a593929190610471565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a8360405161012d9190610512565b5f604051808303815f8787f1925050503d805f8114610167576040519150601f19603f3d011682016040523d82523d5f602084013e61016c565b606091505b50509050806101b0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101a790610582565b60405180910390fd5b5050505050565b5f635beb6b7460e01b86848685896040516024016101d99594939291906105af565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516102619190610512565b5f604051808303815f8787f1925050503d805f811461029b576040519150601f19603f3d011682016040523d82523d5f602084013e6102a0565b606091505b50509050806102e4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102db9061064a565b60405180910390fd5b50505050505050565b5f80fd5b5f819050919050565b610303816102f1565b811461030d575f80fd5b50565b5f8135905061031e816102fa565b92915050565b5f819050919050565b61033681610324565b8114610340575f80fd5b50565b5f813590506103518161032d565b92915050565b5f805f6060848603121561036e5761036d6102ed565b5b5f61037b86828701610310565b935050602061038c86828701610343565b925050604061039d86828701610343565b9150509250925092565b5f8115159050919050565b6103bb816103a7565b81146103c5575f80fd5b50565b5f813590506103d6816103b2565b92915050565b5f805f805f60a086880312156103f5576103f46102ed565b5b5f61040288828901610310565b955050602061041388828901610343565b945050604061042488828901610343565b935050606061043588828901610343565b9250506080610446888289016103c8565b9150509295509295909350565b61045c816102f1565b82525050565b61046b81610324565b82525050565b5f6060820190506104845f830186610453565b6104916020830185610462565b61049e6040830184610462565b949350505050565b5f81519050919050565b5f81905092915050565b5f5b838110156104d75780820151818401526020810190506104bc565b5f8484015250505050565b5f6104ec826104a6565b6104f681856104b0565b93506105068185602086016104ba565b80840191505092915050565b5f61051d82846104e2565b915081905092915050565b5f82825260208201905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f61056c601483610528565b915061057782610538565b602082019050919050565b5f6020820190508181035f83015261059981610560565b9050919050565b6105a9816103a7565b82525050565b5f60a0820190506105c25f830188610453565b6105cf6020830187610462565b6105dc6040830186610462565b6105e960608301856105a0565b6105f66080830184610462565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f610634601983610528565b915061063f82610600565b602082019050919050565b5f6020820190508181035f83015261066181610628565b905091905056fea264697066735822122083351bec20bd75de90a1b6e405922bedadf9ff260c02f34ef9dbb5ee1bda11cd64736f6c63430008140033" +export const bytecode = "6080604052348015600e575f5ffd5b506107e78061001c5f395ff3fe608060405260043610610037575f3560e01c80632daedd52146100425780637d691e301461006a57806390b9d534146100925761003e565b3661003e57005b5f5ffd5b34801561004d575f5ffd5b50610068600480360381019061006391906104ba565b6100ba565b005b348015610075575f5ffd5b50610090600480360381019061008b91906104ba565b6101ea565b005b34801561009d575f5ffd5b506100b860048036038101906100b3919061053f565b61031a565b005b5f631fc9b14160e01b8483856040516024016100d8939291906105d4565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a83604051610160919061065b565b5f604051808303815f8787f1925050503d805f811461019a576040519150601f19603f3d011682016040523d82523d5f602084013e61019f565b606091505b50509050806101e3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101da906106cb565b60405180910390fd5b5050505050565b5f637d691e3060e01b848385604051602401610208939291906105d4565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a83604051610290919061065b565b5f604051808303815f8787f1925050503d805f81146102ca576040519150601f19603f3d011682016040523d82523d5f602084013e6102cf565b606091505b5050905080610313576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161030a906106cb565b60405180910390fd5b5050505050565b5f635beb6b7460e01b868486858960405160240161033c9594939291906106f8565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516103c4919061065b565b5f604051808303815f8787f1925050503d805f81146103fe576040519150601f19603f3d011682016040523d82523d5f602084013e610403565b606091505b5050905080610447576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161043e90610793565b60405180910390fd5b50505050505050565b5f5ffd5b5f819050919050565b61046681610454565b8114610470575f5ffd5b50565b5f813590506104818161045d565b92915050565b5f819050919050565b61049981610487565b81146104a3575f5ffd5b50565b5f813590506104b481610490565b92915050565b5f5f5f606084860312156104d1576104d0610450565b5b5f6104de86828701610473565b93505060206104ef868287016104a6565b9250506040610500868287016104a6565b9150509250925092565b5f8115159050919050565b61051e8161050a565b8114610528575f5ffd5b50565b5f8135905061053981610515565b92915050565b5f5f5f5f5f60a0868803121561055857610557610450565b5b5f61056588828901610473565b9550506020610576888289016104a6565b9450506040610587888289016104a6565b9350506060610598888289016104a6565b92505060806105a98882890161052b565b9150509295509295909350565b6105bf81610454565b82525050565b6105ce81610487565b82525050565b5f6060820190506105e75f8301866105b6565b6105f460208301856105c5565b61060160408301846105c5565b949350505050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f61063582610609565b61063f8185610613565b935061064f81856020860161061d565b80840191505092915050565b5f610666828461062b565b915081905092915050565b5f82825260208201905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f6106b5601483610671565b91506106c082610681565b602082019050919050565b5f6020820190508181035f8301526106e2816106a9565b9050919050565b6106f28161050a565b82525050565b5f60a08201905061070b5f8301886105b6565b61071860208301876105c5565b61072560408301866105c5565b61073260608301856106e9565b61073f60808301846105c5565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f61077d601983610671565b915061078882610749565b602082019050919050565b5f6020820190508181035f8301526107aa81610771565b905091905056fea264697066735822122071b79d865c1a277a5c7a45dcea9fe84a941f14667654af515610dfd55d3a6fe764736f6c634300081e0033" diff --git a/evm-tests/test/staking.precompile.wrap.test.ts b/evm-tests/test/staking.precompile.wrap.test.ts index 01b605beb5..077f78a42b 100644 --- a/evm-tests/test/staking.precompile.wrap.test.ts +++ b/evm-tests/test/staking.precompile.wrap.test.ts @@ -76,10 +76,31 @@ describe("Test staking precompile add from deployed contract", () => { const tx = await deployedContract.stake( hotkey.publicKey, netuid, - tao(2000), + tao(2), ); await tx.wait(); + const stake = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + convertH160ToSS58(contract.target.toString()), + netuid + ) + console.log(" == before remove stake is ", stake) + + const tx2 = await deployedContract.removeStake( + hotkey.publicKey, + netuid, + tao(1), + ); + await tx2.wait(); + + const stake2 = await api.query.SubtensorModule.Alpha.getValue( + convertPublicKeyToSs58(hotkey.publicKey), + convertH160ToSS58(contract.target.toString()), + netuid + ) + console.log(" == after remove stake is ", stake2) + }); it("Staker add stake limit", async () => { From 501e6e354c6e9543c25bf415e10773811904e714 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 12 Aug 2025 15:20:40 +0000 Subject: [PATCH 044/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 1f9d85d08b..510dc06ca9 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -827,7 +827,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(30_170_000, 0) + #[pallet::weight((Weight::from_parts(22_850_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -1045,7 +1045,7 @@ mod dispatches { /// #[pallet::call_index(69)] #[pallet::weight(( - Weight::from_parts(3_630_000, 0) + Weight::from_parts(4_770_000, 0) .saturating_add(T::DbWeight::get().reads(0)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Operational, From d404554a9f0ecf079ef8bc81f52d4f96765f1f1c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 12 Aug 2025 12:47:17 -0700 Subject: [PATCH 045/379] post-merge compilation fixes --- Cargo.lock | 17 --- pallets/subtensor/src/coinbase/root.rs | 88 +++++++------ pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/macros/hooks.rs | 4 +- pallets/subtensor/src/subnets/subnet.rs | 15 +-- pallets/subtensor/src/tests/networks.rs | 136 +++++++++++---------- 6 files changed, 136 insertions(+), 126 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e2df0a48f..f70c3dc20c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -238,7 +238,6 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.104", - "syn 2.0.104", ] [[package]] @@ -8725,22 +8724,6 @@ dependencies = [ "quote", "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", "syn 2.0.104", - "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2412-6)", - "syn 2.0.104", -] - -[[package]] -name = "predicates" -version = "2.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" -dependencies = [ - "difflib", - "float-cmp", - "itertools 0.10.5", - "normalize-line-endings", - "predicates-core", - "regex", ] [[package]] diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 8a83298529..7618cf7c48 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -462,6 +462,10 @@ impl Pallet { SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); + // --- 12. Add the balance back to the owner. + SubnetOwner::::remove(netuid); + + // --- 13. Remove subnet identity if it exists. if SubnetIdentitiesV3::::contains_key(netuid) { SubnetIdentitiesV3::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); @@ -584,49 +588,54 @@ impl Pallet { // 2. Basic info. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); - let lock_cost: u64 = Self::get_subnet_locked_balance(netuid); + let lock_cost_u64: u64 = Self::get_subnet_locked_balance(netuid).into(); // Owner-cut already received from emissions. - let total_emission: u64 = Emission::::get(netuid).iter().sum(); + let total_emission_u64: u64 = Emission::::get(netuid) + .into_iter() + .map(Into::::into) + .sum(); let owner_fraction = Self::get_float_subnet_owner_cut(); - let owner_received_emission = U96F32::from_num(total_emission) + let owner_received_emission_u64 = U96F32::from_num(total_emission_u64) .saturating_mul(owner_fraction) .floor() .saturating_to_num::(); - // 3. Gather α-out stakers. - let mut total_alpha_out: u128 = 0; + // 3. Gather α-out stakers (U64F64 -> use raw bits as weights). + let mut total_alpha_bits: u128 = 0; let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); for ((hot, cold, this_netuid), alpha) in Alpha::::iter() { if this_netuid == netuid { - let a = alpha.saturating_to_num::(); - total_alpha_out = total_alpha_out.saturating_add(a); - stakers.push((hot, cold, a)); + let a_bits: u128 = alpha.to_bits(); // <- was `alpha.into()`; that doesn't exist + total_alpha_bits = total_alpha_bits.saturating_add(a_bits); + stakers.push((hot, cold, a_bits)); } } - // 4. Pro-rata distribution – TAO restaked to ROOT. - let subnet_tao: u128 = SubnetTAO::::get(netuid) as u128; + // 4. Pro‑rata distribution – TAO restaked to ROOT. + let subnet_tao_u64: u64 = SubnetTAO::::get(netuid).into(); let root_netuid = NetUid::ROOT; - if total_alpha_out > 0 && subnet_tao > 0 && !stakers.is_empty() { + if total_alpha_bits > 0 && subnet_tao_u64 > 0 && !stakers.is_empty() { struct Portion { hot: A, cold: C, share: u64, rem: u128, } + + let pot_u128 = subnet_tao_u64 as u128; let mut portions: Vec> = Vec::with_capacity(stakers.len()); let mut distributed: u128 = 0; - for (hot, cold, a) in &stakers { - let prod = subnet_tao.saturating_mul(*a); - let share_u128 = prod.checked_div(total_alpha_out).unwrap_or_default(); + for (hot, cold, a_bits) in &stakers { + let prod = pot_u128.saturating_mul(*a_bits); + let share_u128 = prod.checked_div(total_alpha_bits).unwrap_or_default(); let share_u64 = share_u128.min(u64::MAX as u128) as u64; distributed = distributed.saturating_add(share_u64 as u128); - let rem = prod.checked_rem(total_alpha_out).unwrap_or_default(); + let rem = prod.checked_rem(total_alpha_bits).unwrap_or_default(); portions.push(Portion { hot: hot.clone(), cold: cold.clone(), @@ -635,11 +644,12 @@ impl Pallet { }); } - // Handle leftover (< stakers.len()). - let leftover = subnet_tao.saturating_sub(distributed); + // Largest‑remainder method; clamp for wasm32 (usize = 32‑bit). + let leftover = pot_u128.saturating_sub(distributed); if leftover > 0 { portions.sort_by(|a, b| b.rem.cmp(&a.rem)); - for p in portions.iter_mut().take(leftover as usize) { + let give = core::cmp::min(leftover, portions.len() as u128) as usize; + for p in portions.iter_mut().take(give) { p.share = p.share.saturating_add(1); } } @@ -647,8 +657,14 @@ impl Pallet { // Restake into root and clean α records. for p in portions { if p.share > 0 { - // Zero-fee restake of TAO into the root network. - Self::stake_into_subnet(&p.hot, &p.cold, root_netuid, p.share, 0u64)?; + Self::stake_into_subnet( + &p.hot, + &p.cold, + root_netuid, + p.share.into(), + TaoCurrency::from(0), + false, + )?; } Alpha::::remove((&p.hot, &p.cold, netuid)); } @@ -659,42 +675,44 @@ impl Pallet { } } - // 5. Reset α in/out counters. - SubnetAlphaIn::::insert(netuid, 0); - SubnetAlphaOut::::insert(netuid, 0); + // 5. Reset α in/out counters — use typed zeros (no inference issues). + SubnetAlphaIn::::insert(netuid, AlphaCurrency::from(0)); + SubnetAlphaOut::::insert(netuid, AlphaCurrency::from(0)); // 6. Refund remaining lock to subnet owner. - let refund = lock_cost.saturating_sub(owner_received_emission); - Self::set_subnet_locked_balance(netuid, 0); - if refund > 0 { - Self::add_balance_to_coldkey_account(&owner_coldkey, refund); + let refund_u64 = lock_cost_u64.saturating_sub(owner_received_emission_u64); + Self::set_subnet_locked_balance(netuid, TaoCurrency::from(0)); + if refund_u64 > 0 { + // This helper expects runtime Balance (u64), not TaoCurrency. + Self::add_balance_to_coldkey_account(&owner_coldkey, refund_u64); } Ok(()) } - pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); let total_networks: u16 = TotalNetworks::::get(); let mut candidate_netuid: Option = None; - let mut candidate_emission = u64::MAX; - let mut candidate_timestamp = u64::MAX; + let mut candidate_emission: u64 = u64::MAX; + let mut candidate_timestamp: u64 = u64::MAX; for net in 1..=total_networks { let netuid: NetUid = net.into(); let registered_at = NetworkRegisteredAt::::get(netuid); - // Skip immune networks + // Skip immune networks. if current_block < registered_at.saturating_add(Self::get_network_immunity_period()) { continue; } - // We want total emission across all UIDs in this subnet: - let emission_vec = Emission::::get(netuid); - let total_emission = emission_vec.iter().sum::(); + // Sum AlphaCurrency as u64 for comparison. + let total_emission: u64 = Emission::::get(netuid) + .into_iter() + .map(Into::::into) + .sum(); - // If tie on total_emission, earliest registration wins + // If tie on total_emission, earliest registration wins. if total_emission < candidate_emission || (total_emission == candidate_emission && registered_at < candidate_timestamp) { diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 4453dbaae6..e9cf2443fb 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2222,7 +2222,7 @@ mod dispatches { /// Remove a user's subnetwork /// The caller must be root - #[pallet::call_index(110)] + #[pallet::call_index(114)] #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 7083c24e5d..8f300bd8f8 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -129,7 +129,9 @@ mod hooks { // Migrate subnet symbols to fix the shift after subnet 81 .saturating_add(migrations::migrate_subnet_symbols::migrate_subnet_symbols::()) // Migrate CRV3 add commit_block - .saturating_add(migrations::migrate_crv3_commits_add_block::migrate_crv3_commits_add_block::()); + .saturating_add(migrations::migrate_crv3_commits_add_block::migrate_crv3_commits_add_block::()) + // Migrate Immunity Period + .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()); weight } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index bbe197c3bc..e7c9d5d084 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -167,15 +167,11 @@ impl Pallet { Error::::NotEnoughBalanceToStake ); - // --- 6. Determine the netuid to register. - let netuid_to_register = Self::get_next_netuid(); - // --- 7. Perform the lock operation. let actual_tao_lock_amount = Self::remove_balance_from_coldkey_account(&coldkey, lock_amount.into())?; log::debug!("actual_tao_lock_amount: {actual_tao_lock_amount:?}"); - // --- 8. Set the lock amount for use to determine pricing. // --- 8. Set the lock amount for use to determine pricing. Self::set_network_last_lock(actual_tao_lock_amount); @@ -208,11 +204,11 @@ impl Pallet { NetworkLastRegistered::::set(current_block); NetworkRegisteredAt::::insert(netuid_to_register, current_block); - // --- 13. Set the symbol. + // --- 15. Set the symbol. let symbol = Self::get_next_available_symbol(netuid_to_register); TokenSymbol::::insert(netuid_to_register, symbol); - // --- 15. Init the pool by putting the lock as the initial alpha. + // --- 16. Init the pool by putting the lock as the initial alpha. TokenSymbol::::insert( netuid_to_register, Self::get_symbol_for_subnet(netuid_to_register), @@ -239,7 +235,7 @@ impl Pallet { Self::increase_total_stake(pool_initial_tao); } - // --- 16. Add the identity if it exists + // --- 17. Add the identity if it exists if let Some(identity_value) = identity { ensure!( Self::is_valid_subnet_identity(&identity_value), @@ -250,12 +246,11 @@ impl Pallet { Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); } - - // --- 17. Emit the NetworkAdded event. + // --- 18. Emit the NetworkAdded event. log::info!("NetworkAdded( netuid:{netuid_to_register:?}, mechanism:{mechid:?} )"); Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); - // --- 18. Return success. + // --- 19. Return success. Ok(()) } diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index e80021ad1a..ba107c73a4 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -4,8 +4,8 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; -use subtensor_runtime_common::TaoCurrency; use substrate_fixed::types::U64F64; +use subtensor_runtime_common::TaoCurrency; use subtensor_swap_interface::SwapHandler; #[test] @@ -49,9 +49,9 @@ fn dissolve_no_stakers_no_alpha_no_emission() { let hot = U256::from(2); let net = add_dynamic_network(&hot, &cold); - SubtensorModule::set_subnet_locked_balance(net, 0); - SubnetTAO::::insert(net, 0); - Emission::::insert(net, Vec::::new()); + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + SubnetTAO::::insert(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); let before = SubtensorModule::get_coldkey_balance(&cold); assert_ok!(SubtensorModule::do_dissolve_network(net)); @@ -70,16 +70,16 @@ fn dissolve_refunds_full_lock_cost_when_no_emission() { let hot = U256::from(4); let net = add_dynamic_network(&hot, &cold); - let lock = 1_000_000u64; + let lock: TaoCurrency = TaoCurrency::from(1_000_000); SubtensorModule::set_subnet_locked_balance(net, lock); - SubnetTAO::::insert(net, 0); - Emission::::insert(net, Vec::::new()); + SubnetTAO::::insert(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); let before = SubtensorModule::get_coldkey_balance(&cold); assert_ok!(SubtensorModule::do_dissolve_network(net)); let after = SubtensorModule::get_coldkey_balance(&cold); - assert_eq!(after, before + lock); + assert_eq!(TaoCurrency::from(after), TaoCurrency::from(before) + lock); }); } @@ -95,8 +95,8 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { let (s_hot, s_cold) = (U256::from(100), U256::from(200)); Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); - SubnetTAO::::insert(net, 99_999u64); - SubtensorModule::set_subnet_locked_balance(net, 0); + SubnetTAO::::insert(net, TaoCurrency::from(99_999)); + SubtensorModule::set_subnet_locked_balance(net, 0.into()); // α on ROOT before let root = NetUid::ROOT; @@ -129,8 +129,8 @@ fn dissolve_two_stakers_pro_rata_distribution() { Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); - SubnetTAO::::insert(net, 10_000u64); - SubtensorModule::set_subnet_locked_balance(net, 5_000u64); + SubnetTAO::::insert(net, TaoCurrency::from(10_000)); + SubtensorModule::set_subnet_locked_balance(net, 5_000.into()); // α on ROOT before let root = NetUid::ROOT; @@ -174,24 +174,27 @@ fn dissolve_owner_cut_refund_logic() { let sh = U256::from(77); let sc = U256::from(88); Alpha::::insert((sh, sc, net), U64F64::from_num(100u128)); - SubnetTAO::::insert(net, 1_000); + SubnetTAO::::insert(net, TaoCurrency::from(1_000)); // lock & emission - let lock = 2_000; + let lock: TaoCurrency = TaoCurrency::from(2_000); SubtensorModule::set_subnet_locked_balance(net, lock); - Emission::::insert(net, vec![200u64, 600]); + Emission::::insert( + net, + vec![AlphaCurrency::from(200), AlphaCurrency::from(600)], + ); // 18 % owner-cut SubnetOwnerCut::::put(11_796u16); let frac = 11_796f64 / 65_535f64; - let owner_em = (800f64 * frac).floor() as u64; + let owner_em: TaoCurrency = TaoCurrency::from((800f64 * frac).floor() as u64); let expect = lock.saturating_sub(owner_em); let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); let after = SubtensorModule::get_coldkey_balance(&oc); - assert_eq!(after, before + expect); + assert_eq!(TaoCurrency::from(after), TaoCurrency::from(before) + expect); }); } @@ -202,9 +205,9 @@ fn dissolve_zero_refund_when_emission_exceeds_lock() { let oh = U256::from(2_000); let net = add_dynamic_network(&oh, &oc); - SubtensorModule::set_subnet_locked_balance(net, 1_000); + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(1_000)); SubnetOwnerCut::::put(u16::MAX); // 100 % - Emission::::insert(net, vec![2_000u64]); + Emission::::insert(net, vec![AlphaCurrency::from(2_000)]); let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); @@ -243,7 +246,7 @@ fn dissolve_clears_all_per_subnet_storages() { Rank::::insert(net, vec![1u16]); Trust::::insert(net, vec![1u16]); Active::::insert(net, vec![true]); - Emission::::insert(net, vec![1u64]); + Emission::::insert(net, vec![AlphaCurrency::from(1)]); Incentive::::insert(net, vec![1u16]); Consensus::::insert(net, vec![1u16]); Dividends::::insert(net, vec![1u16]); @@ -267,15 +270,15 @@ fn dissolve_clears_all_per_subnet_storages() { POWRegistrationsThisInterval::::insert(net, 1u16); BurnRegistrationsThisInterval::::insert(net, 1u16); - SubnetTAO::::insert(net, 1u64); - SubnetAlphaInEmission::::insert(net, 1u64); - SubnetAlphaOutEmission::::insert(net, 1u64); - SubnetTaoInEmission::::insert(net, 1u64); + SubnetTAO::::insert(net, TaoCurrency::from(1)); + SubnetAlphaInEmission::::insert(net, AlphaCurrency::from(1)); + SubnetAlphaOutEmission::::insert(net, AlphaCurrency::from(1)); + SubnetTaoInEmission::::insert(net, TaoCurrency::from(1)); SubnetVolume::::insert(net, 1u128); // Fields that will be ZEROED (not removed) - SubnetAlphaIn::::insert(net, 2u64); - SubnetAlphaOut::::insert(net, 3u64); + SubnetAlphaIn::::insert(net, AlphaCurrency::from(2)); + SubnetAlphaOut::::insert(net, AlphaCurrency::from(3)); // Prefix / double-map collections Keys::::insert(net, 0u16, owner_hot); @@ -333,8 +336,8 @@ fn dissolve_clears_all_per_subnet_storages() { // ------------------------------------------------------------------ // Items expected to be PRESENT but ZERO // ------------------------------------------------------------------ - assert_eq!(SubnetAlphaIn::::get(net), 0); - assert_eq!(SubnetAlphaOut::::get(net), 0); + assert_eq!(SubnetAlphaIn::::get(net), 0.into()); + assert_eq!(SubnetAlphaOut::::get(net), 0.into()); // ------------------------------------------------------------------ // Collections fully cleared @@ -361,10 +364,10 @@ fn dissolve_alpha_out_but_zero_tao_no_rewards() { let sh = U256::from(23); let sc = U256::from(24); - Alpha::::insert((sh, sc, net), U64F64::from_num(1_000u128)); - SubnetTAO::::insert(net, 0u64); // zero TAO - SubtensorModule::set_subnet_locked_balance(net, 0); - Emission::::insert(net, Vec::::new()); + Alpha::::insert((sh, sc, net), U64F64::from_num(1_000u64)); + SubnetTAO::::insert(net, TaoCurrency::from(0)); // zero TAO + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + Emission::::insert(net, Vec::::new()); let before = SubtensorModule::get_coldkey_balance(&sc); assert_ok!(SubtensorModule::do_dissolve_network(net)); @@ -407,8 +410,8 @@ fn dissolve_rounding_remainder_distribution() { Alpha::::insert((s1h, s1c, net), U64F64::from_num(3u128)); Alpha::::insert((s2h, s2c, net), U64F64::from_num(2u128)); - SubnetTAO::::insert(net, 1u64); // TAO pot = 1 - SubtensorModule::set_subnet_locked_balance(net, 0); + SubnetTAO::::insert(net, TaoCurrency::from(1)); // TAO pot = 1 + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); // 2. α on ROOT before let root = NetUid::ROOT; @@ -448,8 +451,9 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { // 3. Stake 30 : 70 (s1 : s2) in TAO let min_total = DefaultMinStake::::get(); - let s1 = 3 * min_total; - let s2 = 7 * min_total; + let min_total_u64: u64 = min_total.into(); + let s1: u64 = 3u64 * min_total_u64; + let s2: u64 = 7u64 * min_total_u64; SubtensorModule::add_balance_to_coldkey_account(&c1, s1 + 50_000); SubtensorModule::add_balance_to_coldkey_account(&c2, s2 + 50_000); @@ -458,13 +462,13 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { RuntimeOrigin::signed(c1), h1, netuid, - s1 + s1.into() )); assert_ok!(SubtensorModule::do_add_stake( RuntimeOrigin::signed(c2), h2, netuid, - s2 + s2.into() )); // 4. α-out snapshot @@ -474,8 +478,8 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { // 5. TAO pot & lock let tao_pot: u64 = 10_000; - SubnetTAO::::insert(netuid, tao_pot); - SubtensorModule::set_subnet_locked_balance(netuid, 5_000); + SubnetTAO::::insert(netuid, TaoCurrency::from(tao_pot)); + SubtensorModule::set_subnet_locked_balance(netuid, TaoCurrency::from(5_000)); // 6. Balances & α on the *root* network *before* let root = NetUid::ROOT; @@ -533,9 +537,9 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { // Use the same helper pallet uses in validate_add_stake let fee = ::SwapInterface::approx_fee_amount( netuid.into(), - min_stake, + min_stake.into(), ); - min_stake.saturating_add(fee) + min_stake.saturating_add(fee.into()) }; const N: usize = 20; @@ -543,10 +547,11 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { let mut hot = [U256::zero(); N]; let mut stake = [0u64; N]; + let min_amount_u64: u64 = min_amount.into(); for i in 0..N { cold[i] = U256::from(10_000 + 2 * i as u32); hot[i] = U256::from(10_001 + 2 * i as u32); - stake[i] = (i as u64 + 1) * min_amount; // multiples of min_amount + stake[i] = (i as u64 + 1u64) * min_amount_u64; // multiples of min_amount register_ok_neuron(netuid, hot[i], cold[i], 0); SubtensorModule::add_balance_to_coldkey_account(&cold[i], stake[i] + 100_000); @@ -555,7 +560,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { RuntimeOrigin::signed(cold[i]), hot[i], netuid, - stake[i] + stake[i].into() )); } @@ -570,11 +575,18 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { // ── 3) TAO pot & subnet lock ──────────────────────────────────────── let tao_pot: u64 = 123_456; let lock: u64 = 30_000; - SubnetTAO::::insert(netuid, tao_pot); - SubtensorModule::set_subnet_locked_balance(netuid, lock); + SubnetTAO::::insert(netuid, TaoCurrency::from(tao_pot)); + SubtensorModule::set_subnet_locked_balance(netuid, TaoCurrency::from(lock)); // Owner already earned some emission; owner-cut = 50 % - Emission::::insert(netuid, vec![1_000u64, 2_000, 1_500]); + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1_000), + AlphaCurrency::from(2_000), + AlphaCurrency::from(1_500), + ], + ); SubnetOwnerCut::::put(32_768u16); // = 0.5 in fixed-point // ── 4) balances & α on ROOT before ────────────────────────────────── @@ -640,9 +652,9 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { // α cleared for dissolved subnet assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != netuid)); - assert_eq!(SubnetAlphaIn::::get(netuid), 0); - assert_eq!(SubnetAlphaOut::::get(netuid), 0); - assert_eq!(SubtensorModule::get_subnet_locked_balance(netuid), 0); + assert_eq!(SubnetAlphaIn::::get(netuid), 0.into()); + assert_eq!(SubnetAlphaOut::::get(netuid), 0.into()); + assert_eq!(SubtensorModule::get_subnet_locked_balance(netuid), 0.into()); }); } @@ -661,7 +673,7 @@ fn prune_none_when_all_networks_immune() { let _n2 = add_dynamic_network(&U256::from(4), &U256::from(3)); // emissions don’t matter while immune - Emission::::insert(n1, vec![10u64]); + Emission::::insert(n1, vec![AlphaCurrency::from(10)]); assert_eq!(SubtensorModule::get_network_to_prune(), None); }); @@ -678,8 +690,8 @@ fn prune_selects_network_with_lowest_emission() { System::set_block_number(imm + 10); // n1 has lower total emission - Emission::::insert(n1, vec![5u64]); - Emission::::insert(n2, vec![100u64]); + Emission::::insert(n1, vec![AlphaCurrency::from(5)]); + Emission::::insert(n2, vec![AlphaCurrency::from(100)]); assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); }); @@ -698,8 +710,8 @@ fn prune_ignores_immune_network_even_if_lower_emission() { let n2 = add_dynamic_network(&U256::from(44), &U256::from(33)); // emissions: n1 bigger, n2 smaller but immune - Emission::::insert(n1, vec![50u64]); - Emission::::insert(n2, vec![1u64]); + Emission::::insert(n1, vec![AlphaCurrency::from(50)]); + Emission::::insert(n2, vec![AlphaCurrency::from(1)]); System::set_block_number(imm + 10); // still immune for n2 assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); @@ -721,8 +733,8 @@ fn prune_tie_on_emission_earlier_registration_wins() { System::set_block_number(imm + 20); // identical emissions → tie - Emission::::insert(n1, vec![123u64]); - Emission::::insert(n2, vec![123u64]); + Emission::::insert(n1, vec![AlphaCurrency::from(123)]); + Emission::::insert(n2, vec![AlphaCurrency::from(123)]); // earlier (n1) must be chosen assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); @@ -739,7 +751,7 @@ fn register_network_under_limit_success() { let cold = U256::from(10); let hot = U256::from(11); - let lock_now = SubtensorModule::get_network_lock_cost(); + let lock_now: u64 = SubtensorModule::get_network_lock_cost().into(); SubtensorModule::add_balance_to_coldkey_account(&cold, lock_now.saturating_mul(10)); assert_ok!(SubtensorModule::do_register_network( @@ -772,12 +784,12 @@ fn register_network_prunes_and_recycles_netuid() { let imm = SubtensorModule::get_network_immunity_period(); System::set_block_number(imm + 100); - Emission::::insert(n1, vec![1u64]); - Emission::::insert(n2, vec![1_000u64]); + Emission::::insert(n1, vec![AlphaCurrency::from(1)]); + Emission::::insert(n2, vec![AlphaCurrency::from(1_000)]); let new_cold = U256::from(30); let new_hot = U256::from(31); - let needed = SubtensorModule::get_network_lock_cost(); + let needed: u64 = SubtensorModule::get_network_lock_cost().into(); SubtensorModule::add_balance_to_coldkey_account(&new_cold, needed.saturating_mul(10)); assert_ok!(SubtensorModule::do_register_network( @@ -805,7 +817,7 @@ fn register_network_fails_before_prune_keeps_existing() { let imm = SubtensorModule::get_network_immunity_period(); System::set_block_number(imm + 50); - Emission::::insert(net, vec![10u64]); + Emission::::insert(net, vec![AlphaCurrency::from(10)]); let caller_cold = U256::from(50); let caller_hot = U256::from(51); From 5bda41ed33e804afdb0a80f1da2b1389833cf3c8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 12 Aug 2025 21:36:55 +0000 Subject: [PATCH 046/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index e9cf2443fb..228d279778 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -827,7 +827,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(30_170_000, 0) + #[pallet::weight((Weight::from_parts(21_890_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -1193,8 +1193,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] - #[pallet::weight((Weight::from_parts(260_500_000, 0) - .saturating_add(T::DbWeight::get().reads(34)) + #[pallet::weight((Weight::from_parts(200_400_000, 0) + .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Operational, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) @@ -1538,8 +1538,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] - #[pallet::weight((Weight::from_parts(239_700_000, 0) - .saturating_add(T::DbWeight::get().reads(33)) + #[pallet::weight((Weight::from_parts(180_900_000, 0) + .saturating_add(T::DbWeight::get().reads(36_u64)) .saturating_add(T::DbWeight::get().writes(50)), DispatchClass::Operational, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, From a6307b17f1682e7f58c3b758084bd59ec3d56cc8 Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 13 Aug 2025 19:56:08 +0800 Subject: [PATCH 047/379] remove stake is ok --- .../test/staking.precompile.wrap.test.ts | 21 +------------------ 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/evm-tests/test/staking.precompile.wrap.test.ts b/evm-tests/test/staking.precompile.wrap.test.ts index 077f78a42b..e4d666adf1 100644 --- a/evm-tests/test/staking.precompile.wrap.test.ts +++ b/evm-tests/test/staking.precompile.wrap.test.ts @@ -18,7 +18,6 @@ import { } from "../src/subtensor"; import { ethers } from "ethers"; import { generateRandomEthersWallet } from "../src/utils"; -import { log } from "console"; import { abi, bytecode } from "../src/contracts/stakeWrap"; @@ -47,14 +46,13 @@ describe("Test staking precompile add from deployed contract", () => { console.log("will test in subnet: ", netuid); }); - it("Staker add stake", async () => { + it("Staker add and remove stake", async () => { let netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; const contractFactory = new ethers.ContractFactory(abi, bytecode, wallet1) const contract = await contractFactory.deploy() await contract.waitForDeployment() - // stake will remove the balance from contract, need transfer token to deployed contract const ethTransfer = { to: contract.target.toString(), @@ -64,9 +62,6 @@ describe("Test staking precompile add from deployed contract", () => { const txResponse = await wallet1.sendTransaction(ethTransfer) await txResponse.wait(); - const balance = await api.query.System.Account.getValue(convertH160ToSS58(contract.target.toString())) - console.log(" == balance is ", balance.data.free) - const deployedContract = new ethers.Contract( contract.target.toString(), abi, @@ -80,13 +75,6 @@ describe("Test staking precompile add from deployed contract", () => { ); await tx.wait(); - const stake = await api.query.SubtensorModule.Alpha.getValue( - convertPublicKeyToSs58(hotkey.publicKey), - convertH160ToSS58(contract.target.toString()), - netuid - ) - console.log(" == before remove stake is ", stake) - const tx2 = await deployedContract.removeStake( hotkey.publicKey, netuid, @@ -94,13 +82,6 @@ describe("Test staking precompile add from deployed contract", () => { ); await tx2.wait(); - const stake2 = await api.query.SubtensorModule.Alpha.getValue( - convertPublicKeyToSs58(hotkey.publicKey), - convertH160ToSS58(contract.target.toString()), - netuid - ) - console.log(" == after remove stake is ", stake2) - }); it("Staker add stake limit", async () => { From 67908b67a71b7577cc2b560f586d842f24f67766 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 13 Aug 2025 13:44:47 +0000 Subject: [PATCH 048/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index d7e9b9ab1e..c53bb8a078 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -413,7 +413,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. /// #[pallet::call_index(8)] - #[pallet::weight((Weight::from_parts(2_684_000, 0) + #[pallet::weight((Weight::from_parts(2_083_000, 0) .saturating_add(T::DbWeight::get().reads(0_u64)) .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] pub fn set_tao_weights( @@ -1575,7 +1575,7 @@ mod dispatches { /// * `TxRateLimitExceeded`: /// - Thrown if key has hit transaction rate limit #[pallet::call_index(83)] - #[pallet::weight((Weight::from_parts(28_910_000, 0) + #[pallet::weight((Weight::from_parts(22_070_000, 0) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(0)), DispatchClass::Operational, Pays::Yes))] pub fn unstake_all(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { @@ -1608,7 +1608,7 @@ mod dispatches { /// * `TxRateLimitExceeded`: /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] - #[pallet::weight((Weight::from_parts(395_800_000, 0) + #[pallet::weight((Weight::from_parts(291_900_000, 0) .saturating_add(T::DbWeight::get().reads(33)) .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { @@ -2075,7 +2075,7 @@ mod dispatches { /// at which or better (higher) the staking should execute. /// Without limit_price it remove all the stake similar to `remove_stake` extrinsic #[pallet::call_index(103)] - #[pallet::weight((Weight::from_parts(421_500_000, 10142) + #[pallet::weight((Weight::from_parts(313_200_000, 10142) .saturating_add(T::DbWeight::get().reads(30_u64)) .saturating_add(T::DbWeight::get().writes(14_u64)), DispatchClass::Normal, Pays::Yes))] pub fn remove_stake_full_limit( @@ -2156,7 +2156,7 @@ mod dispatches { /// Emits a `SymbolUpdated` event on success. #[pallet::call_index(112)] #[pallet::weight(( - Weight::from_parts(28_910_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), + Weight::from_parts(19_600_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), DispatchClass::Operational, Pays::Yes ))] From 219f1b6dc76699f651134be30219366fe81da16f Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 13 Aug 2025 11:36:02 -0400 Subject: [PATCH 049/379] wip --- .../subtensor/src/coinbase/run_coinbase.rs | 26 +++++++--- pallets/subtensor/src/lib.rs | 49 +++++++++++++++++++ 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 551883653b..9a966104e5 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -453,16 +453,30 @@ impl Pallet { } } + let maybe_owner_hotkey = SubnetOwnerHotkey::::try_get(netuid); + // Distribute mining incentives. for (hotkey, incentive) in incentives { log::debug!("incentives: hotkey: {incentive:?}"); - if let Ok(owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - if hotkey == owner_hotkey { - log::debug!( - "incentives: hotkey: {hotkey:?} is SN owner hotkey, skipping {incentive:?}" - ); - continue; // Skip/burn miner-emission for SN owner hotkey. + if maybe_owner_hotkey.is_ok_and(|owner_hotkey| hotkey == owner_hotkey) { + log::debug!("incentives: hotkey: {hotkey:?} is SN owner hotkey"); + match RecycleOrBurn::::try_get(netuid) { + Ok(RecycleOrBurn::Recycle) => { + log::debug!("recycling {incentive:?}"); + // recycle the incentive + + // Recycle means we should decrease the alpha issuance tracker. + SubnetAlphaOut::::mutate(netuid, |total| { + *total = total.saturating_sub(incentive); + }); + + continue; + } + Ok(RecycleOrBurn::Burn) | Err(_) => { + log::debug!("burning {incentive:?}"); // Skip/burn miner-emission for SN owner hotkey. + continue; + } } } // Increase stake for miner. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 17f7c69672..92f4b137ff 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -304,6 +304,46 @@ pub mod pallet { pub additional: Vec, } + /// Enum for recycle or burn for the owner_uid(s) + /// + /// Can specify + #[derive(TypeInfo, Clone, PartialEq, Eq, Debug)] + #[default = Self::Burn(U16::MAX)] // default to burn everything + pub enum RecycleOrBurnEnum { + Burn(u16), // u16-normalized weight + Recycle(u16), + } + impl codec::EncodeLike for RecycleOrBurnEnum { + fn encode_to(&self, e: &mut E) -> Result<(), E::Error> { + match self { + Self::Burn(weight) => { + e.encode_u8(0)?; + e.encode_u16(*weight) + } + Self::Recycle(weight) => { + e.encode_u8(1)?; + e.encode_u16(*weight) + } + } + } + } + impl codec::DecodeLike for RecycleOrBurnEnum { + fn decode(d: &mut D) -> Result { + let tag = d.read_byte()?; + match tag { + 0 => { + let weight = d.read_u16()?; + Ok(Self::Burn(weight)) + } + 1 => { + let weight = d.read_u16()?; + Ok(Self::Recycle(weight)) + } + _ => Err(codec::Error::from("invalid tag")), + } + } + } + /// ============================ /// ==== Staking + Accounts ==== /// ============================ @@ -552,6 +592,11 @@ pub mod pallet { T::InitialSubnetOwnerCut::get() } #[pallet::type_value] + /// Default value for recycle or burn. + pub fn DefaultRecycleOrBurn() -> RecycleOrBurnEnum { + RecycleOrBurnEnum::Burn(U16::MAX) // default to burn + } + #[pallet::type_value] /// Default value for network rate limit. pub fn DefaultNetworkRateLimit() -> u64 { if cfg!(feature = "pow-faucet") { @@ -1303,6 +1348,10 @@ pub mod pallet { pub type SubnetOwnerHotkey = StorageMap<_, Identity, NetUid, T::AccountId, ValueQuery, DefaultSubnetOwner>; #[pallet::storage] + /// --- MAP ( netuid ) --> recycle_or_burn + pub type RecycleOrBurn = + StorageMap<_, Identity, NetUid, RecycleOrBurnEnum, ValueQuery, DefaultRecycleOrBurn>; + #[pallet::storage] /// --- MAP ( netuid ) --> serving_rate_limit pub type ServingRateLimit = StorageMap<_, Identity, NetUid, u64, ValueQuery, DefaultServingRateLimit>; From bb1f5e97bb1fe50b71feb4920e6be664bd5f1580 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 14 Aug 2025 10:14:17 +0000 Subject: [PATCH 050/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 76ae53905b..550c25fc1f 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -693,7 +693,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(36_090_000, 0) + #[pallet::weight((Weight::from_parts(25_630_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon( @@ -1609,8 +1609,8 @@ mod dispatches { /// - Thrown if key has hit transaction rate limit #[pallet::call_index(84)] #[pallet::weight((Weight::from_parts(294_800_000, 0) - .saturating_add(T::DbWeight::get().reads(33)) - .saturating_add(T::DbWeight::get().writes(16)), DispatchClass::Operational, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(21_u64)), DispatchClass::Operational, Pays::Yes))] pub fn unstake_all_alpha(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_unstake_all_alpha(origin, hotkey) } @@ -1723,8 +1723,8 @@ mod dispatches { #[pallet::call_index(87)] #[pallet::weight(( Weight::from_parts(274_400_000, 0) - .saturating_add(T::DbWeight::get().reads(32)) - .saturating_add(T::DbWeight::get().writes(17)), + .saturating_add(T::DbWeight::get().reads(37_u64)) + .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Operational, Pays::Yes ))] @@ -1896,8 +1896,8 @@ mod dispatches { #[pallet::call_index(90)] #[pallet::weight(( Weight::from_parts(330_400_000, 0) - .saturating_add(T::DbWeight::get().reads(32)) - .saturating_add(T::DbWeight::get().writes(17)), + .saturating_add(T::DbWeight::get().reads(37_u64)) + .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Operational, Pays::Yes ))] From b3a730ffc88210cce7e7bf92b8995807d4307129 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 14 Aug 2025 18:17:46 +0000 Subject: [PATCH 051/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 4b1d677728..5069bbc36f 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1194,8 +1194,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(36)) - .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().reads(37_u64)) + .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Operational, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1539,8 +1539,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) - .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Operational, Pays::No))] + .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Operational, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -2201,7 +2201,7 @@ mod dispatches { /// * commit_reveal_version (`u16`): /// - The client (bittensor-drand) version #[pallet::call_index(113)] - #[pallet::weight((Weight::from_parts(65_780_000, 0) + #[pallet::weight((Weight::from_parts(80_450_000, 0) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( From d44fd56d696f8d529c097eac9f623a5b345eb76d Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 14 Aug 2025 13:26:57 -0700 Subject: [PATCH 052/379] fix dynamic symbol --- pallets/subtensor/src/subnets/subnet.rs | 6 ------ pallets/subtensor/src/tests/subnet.rs | 2 ++ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index e7c9d5d084..21df71e8d6 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -208,12 +208,6 @@ impl Pallet { let symbol = Self::get_next_available_symbol(netuid_to_register); TokenSymbol::::insert(netuid_to_register, symbol); - // --- 16. Init the pool by putting the lock as the initial alpha. - TokenSymbol::::insert( - netuid_to_register, - Self::get_symbol_for_subnet(netuid_to_register), - ); // Set subnet token symbol. - // Put initial TAO from lock into subnet TAO and produce numerically equal amount of Alpha // The initial TAO is the locked amount, with a minimum of 1 RAO and a cap of 100 TAO. let pool_initial_tao = Self::get_network_min_lock(); diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index b60f3ffa41..6bf4a6873b 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -231,6 +231,7 @@ fn test_register_network_min_burn_at_default() { #[test] fn test_register_network_use_symbol_for_subnet_if_available() { new_test_ext(1).execute_with(|| { + SubtensorModule::set_max_subnets(SYMBOLS.len() as u16); for i in 0..(SYMBOLS.len() - 1) { let coldkey = U256::from(1_000_000 + i); let hotkey = U256::from(2_000_000 + i); @@ -317,6 +318,7 @@ fn test_register_network_use_next_available_symbol_if_symbol_for_subnet_is_taken fn test_register_network_use_default_symbol_if_all_symbols_are_taken() { new_test_ext(1).execute_with(|| { // Register networks until we have exhausted all symbols + SubtensorModule::set_max_subnets(SYMBOLS.len() as u16); for i in 0..(SYMBOLS.len() - 1) { let coldkey = U256::from(1_000_000 + i); let hotkey = U256::from(2_000_000 + i); From 64dc420f542bfbaa9f42e8ab5f88061b94c463bf Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 18 Aug 2025 09:51:51 +0800 Subject: [PATCH 053/379] add only owner modifier --- evm-tests/src/contracts/stakeWrap.sol | 27 +++++++++++++++++++++++---- evm-tests/src/contracts/stakeWrap.ts | 15 ++++++++++++++- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/evm-tests/src/contracts/stakeWrap.sol b/evm-tests/src/contracts/stakeWrap.sol index c5c9f7ae53..c7d11d42a0 100644 --- a/evm-tests/src/contracts/stakeWrap.sol +++ b/evm-tests/src/contracts/stakeWrap.sol @@ -15,13 +15,32 @@ interface Staking { ) external; function addStake(bytes32 hotkey, uint256 amount, uint256 netuid) external; + + function removeStake( + bytes32 hotkey, + uint256 amount, + uint256 netuid + ) external; } contract StakeWrap { - constructor() {} + address public owner; + constructor() { + owner = msg.sender; + } + + modifier onlyOwner() { + require(msg.sender == owner, "Only owner can call this function"); + _; + } + receive() external payable {} - function stake(bytes32 hotkey, uint256 netuid, uint256 amount) external { + function stake( + bytes32 hotkey, + uint256 netuid, + uint256 amount + ) external onlyOwner { // can't call precompile like this way, the call never go to runtime precompile //Staking(ISTAKING_ADDRESS).addStake(hotkey, amount, netuid); @@ -41,7 +60,7 @@ contract StakeWrap { uint256 limitPrice, uint256 amount, bool allowPartial - ) external { + ) external onlyOwner { // can't call precompile like this way, the call never go to runtime precompile // Staking(ISTAKING_ADDRESS).addStakeLimit( // hotkey, @@ -67,7 +86,7 @@ contract StakeWrap { bytes32 hotkey, uint256 netuid, uint256 amount - ) external { + ) external onlyOwner { bytes memory data = abi.encodeWithSelector( Staking.removeStake.selector, hotkey, diff --git a/evm-tests/src/contracts/stakeWrap.ts b/evm-tests/src/contracts/stakeWrap.ts index a6a24bdb21..07853470a1 100644 --- a/evm-tests/src/contracts/stakeWrap.ts +++ b/evm-tests/src/contracts/stakeWrap.ts @@ -4,6 +4,19 @@ export const abi = [ "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -90,4 +103,4 @@ export const abi = [ ]; // compiled with 0.8.20 -export const bytecode = "6080604052348015600e575f5ffd5b506107e78061001c5f395ff3fe608060405260043610610037575f3560e01c80632daedd52146100425780637d691e301461006a57806390b9d534146100925761003e565b3661003e57005b5f5ffd5b34801561004d575f5ffd5b50610068600480360381019061006391906104ba565b6100ba565b005b348015610075575f5ffd5b50610090600480360381019061008b91906104ba565b6101ea565b005b34801561009d575f5ffd5b506100b860048036038101906100b3919061053f565b61031a565b005b5f631fc9b14160e01b8483856040516024016100d8939291906105d4565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a83604051610160919061065b565b5f604051808303815f8787f1925050503d805f811461019a576040519150601f19603f3d011682016040523d82523d5f602084013e61019f565b606091505b50509050806101e3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101da906106cb565b60405180910390fd5b5050505050565b5f637d691e3060e01b848385604051602401610208939291906105d4565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a83604051610290919061065b565b5f604051808303815f8787f1925050503d805f81146102ca576040519150601f19603f3d011682016040523d82523d5f602084013e6102cf565b606091505b5050905080610313576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161030a906106cb565b60405180910390fd5b5050505050565b5f635beb6b7460e01b868486858960405160240161033c9594939291906106f8565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516103c4919061065b565b5f604051808303815f8787f1925050503d805f81146103fe576040519150601f19603f3d011682016040523d82523d5f602084013e610403565b606091505b5050905080610447576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161043e90610793565b60405180910390fd5b50505050505050565b5f5ffd5b5f819050919050565b61046681610454565b8114610470575f5ffd5b50565b5f813590506104818161045d565b92915050565b5f819050919050565b61049981610487565b81146104a3575f5ffd5b50565b5f813590506104b481610490565b92915050565b5f5f5f606084860312156104d1576104d0610450565b5b5f6104de86828701610473565b93505060206104ef868287016104a6565b9250506040610500868287016104a6565b9150509250925092565b5f8115159050919050565b61051e8161050a565b8114610528575f5ffd5b50565b5f8135905061053981610515565b92915050565b5f5f5f5f5f60a0868803121561055857610557610450565b5b5f61056588828901610473565b9550506020610576888289016104a6565b9450506040610587888289016104a6565b9350506060610598888289016104a6565b92505060806105a98882890161052b565b9150509295509295909350565b6105bf81610454565b82525050565b6105ce81610487565b82525050565b5f6060820190506105e75f8301866105b6565b6105f460208301856105c5565b61060160408301846105c5565b949350505050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f61063582610609565b61063f8185610613565b935061064f81856020860161061d565b80840191505092915050565b5f610666828461062b565b915081905092915050565b5f82825260208201905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f6106b5601483610671565b91506106c082610681565b602082019050919050565b5f6020820190508181035f8301526106e2816106a9565b9050919050565b6106f28161050a565b82525050565b5f60a08201905061070b5f8301886105b6565b61071860208301876105c5565b61072560408301866105c5565b61073260608301856106e9565b61073f60808301846105c5565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f61077d601983610671565b915061078882610749565b602082019050919050565b5f6020820190508181035f8301526107aa81610771565b905091905056fea264697066735822122071b79d865c1a277a5c7a45dcea9fe84a941f14667654af515610dfd55d3a6fe764736f6c634300081e0033" +export const bytecode = "6080604052348015600e575f5ffd5b50335f5f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610ad08061005b5f395ff3fe608060405260043610610042575f3560e01c80632daedd521461004d5780637d691e30146100755780638da5cb5b1461009d57806390b9d534146100c757610049565b3661004957005b5f5ffd5b348015610058575f5ffd5b50610073600480360381019061006e91906106bd565b6100ef565b005b348015610080575f5ffd5b5061009b600480360381019061009691906106bd565b6102ad565b005b3480156100a8575f5ffd5b506100b161046b565b6040516100be919061074c565b60405180910390f35b3480156100d2575f5ffd5b506100ed60048036038101906100e8919061079a565b61048f565b005b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461017d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161017490610891565b60405180910390fd5b5f631fc9b14160e01b84838560405160240161019b939291906108cd565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516102239190610954565b5f604051808303815f8787f1925050503d805f811461025d576040519150601f19603f3d011682016040523d82523d5f602084013e610262565b606091505b50509050806102a6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161029d906109b4565b60405180910390fd5b5050505050565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461033b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161033290610891565b60405180910390fd5b5f637d691e3060e01b848385604051602401610359939291906108cd565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516103e19190610954565b5f604051808303815f8787f1925050503d805f811461041b576040519150601f19603f3d011682016040523d82523d5f602084013e610420565b606091505b5050905080610464576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161045b906109b4565b60405180910390fd5b5050505050565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461051d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161051490610891565b60405180910390fd5b5f635beb6b7460e01b868486858960405160240161053f9594939291906109e1565b604051602081830303815290604052907bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff838183161783525050505090505f61080573ffffffffffffffffffffffffffffffffffffffff165a836040516105c79190610954565b5f604051808303815f8787f1925050503d805f8114610601576040519150601f19603f3d011682016040523d82523d5f602084013e610606565b606091505b505090508061064a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161064190610a7c565b60405180910390fd5b50505050505050565b5f5ffd5b5f819050919050565b61066981610657565b8114610673575f5ffd5b50565b5f8135905061068481610660565b92915050565b5f819050919050565b61069c8161068a565b81146106a6575f5ffd5b50565b5f813590506106b781610693565b92915050565b5f5f5f606084860312156106d4576106d3610653565b5b5f6106e186828701610676565b93505060206106f2868287016106a9565b9250506040610703868287016106a9565b9150509250925092565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6107368261070d565b9050919050565b6107468161072c565b82525050565b5f60208201905061075f5f83018461073d565b92915050565b5f8115159050919050565b61077981610765565b8114610783575f5ffd5b50565b5f8135905061079481610770565b92915050565b5f5f5f5f5f60a086880312156107b3576107b2610653565b5b5f6107c088828901610676565b95505060206107d1888289016106a9565b94505060406107e2888289016106a9565b93505060606107f3888289016106a9565b925050608061080488828901610786565b9150509295509295909350565b5f82825260208201905092915050565b7f4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f5f8201527f6e00000000000000000000000000000000000000000000000000000000000000602082015250565b5f61087b602183610811565b915061088682610821565b604082019050919050565b5f6020820190508181035f8301526108a88161086f565b9050919050565b6108b881610657565b82525050565b6108c78161068a565b82525050565b5f6060820190506108e05f8301866108af565b6108ed60208301856108be565b6108fa60408301846108be565b949350505050565b5f81519050919050565b5f81905092915050565b8281835e5f83830152505050565b5f61092e82610902565b610938818561090c565b9350610948818560208601610916565b80840191505092915050565b5f61095f8284610924565b915081905092915050565b7f6164645374616b652063616c6c206661696c65640000000000000000000000005f82015250565b5f61099e601483610811565b91506109a98261096a565b602082019050919050565b5f6020820190508181035f8301526109cb81610992565b9050919050565b6109db81610765565b82525050565b5f60a0820190506109f45f8301886108af565b610a0160208301876108be565b610a0e60408301866108be565b610a1b60608301856109d2565b610a2860808301846108be565b9695505050505050565b7f6164645374616b654c696d69742063616c6c206661696c6564000000000000005f82015250565b5f610a66601983610811565b9150610a7182610a32565b602082019050919050565b5f6020820190508181035f830152610a9381610a5a565b905091905056fea2646970667358221220f8ad692d7919fb10f08e5311c64a0aa705a4e665689967633c2ddade5398076664736f6c634300081e0033" From 204e27562451d559e35aed5ac2ec519d8cb62f2d Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 18 Aug 2025 09:57:13 +0800 Subject: [PATCH 054/379] bump runtime version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 16af31170e..d2b7ed9831 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -213,7 +213,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 302, + spec_version: 303, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 1e325af5cc8f776f4e7e410229f9f798aa0a1e35 Mon Sep 17 00:00:00 2001 From: open-junius Date: Mon, 18 Aug 2025 10:30:13 +0800 Subject: [PATCH 055/379] commit Cargo.lock --- pallets/subtensor/src/macros/dispatches.rs | 4 ---- pallets/subtensor/src/utils/misc.rs | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index c266eed79e..414b5d9acf 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1895,11 +1895,7 @@ mod dispatches { /// May emit a `StakeSwapped` event on success. #[pallet::call_index(90)] #[pallet::weight(( -<<<<<<< HEAD - Weight::from_parts(330_400_000, 0) -======= Weight::from_parts(411_500_000, 0) ->>>>>>> devnet-ready .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(22_u64)), DispatchClass::Operational, diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index c7b93535f3..007ede9389 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -1,7 +1,7 @@ use super::*; -use crate::{ - Error, - system::{ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor}, +use crate::Error; +use crate::system::{ + ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor, }; use safe_math::*; use sp_core::Get; From bb57d798678d031a5b20c3b6f984849275a85e64 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 18 Aug 2025 04:40:53 +0000 Subject: [PATCH 056/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 414b5d9acf..66bf481420 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -693,7 +693,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(4)] - #[pallet::weight((Weight::from_parts(25_630_000, 0) + #[pallet::weight((Weight::from_parts(34_500_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon( From aa06e5a1951770b79a83f0a57ce77794bf5c0278 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 18 Aug 2025 10:17:44 -0700 Subject: [PATCH 057/379] update destroy_alpha_in_out_stakes --- pallets/subtensor/src/coinbase/root.rs | 138 ++++++++++++++++--------- 1 file changed, 91 insertions(+), 47 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 7618cf7c48..2a7b180584 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -580,62 +580,96 @@ impl Pallet { } pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { - // 1. Ensure the subnet exists. + // 1) Ensure the subnet exists. ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - // 2. Basic info. + // 2) Owner / lock cost. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); - let lock_cost_u64: u64 = Self::get_subnet_locked_balance(netuid).into(); + let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); - // Owner-cut already received from emissions. - let total_emission_u64: u64 = Emission::::get(netuid) + // 3) Compute owner's received emission cut with saturation. + let total_emission_u128: u128 = Emission::::get(netuid) .into_iter() - .map(Into::::into) - .sum(); - let owner_fraction = Self::get_float_subnet_owner_cut(); - let owner_received_emission_u64 = U96F32::from_num(total_emission_u64) + .fold(0u128, |acc, e| { + acc.saturating_add(Into::::into(e) as u128) + }); + + let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); + let owner_received_emission_u64: u64 = U96F32::from_num(total_emission_u128) .saturating_mul(owner_fraction) .floor() .saturating_to_num::(); - // 3. Gather α-out stakers (U64F64 -> use raw bits as weights). - let mut total_alpha_bits: u128 = 0; + // 4) Enumerate all alpha entries on this subnet: + // - collect keys to remove, + // - collect per-(hot,cold) actual alpha value for pro-rata (with fallback to raw share), + // - track hotkeys to clear pool totals. + let mut keys_to_remove: Vec<(T::AccountId, T::AccountId)> = Vec::new(); + let mut hotkeys_seen: Vec = Vec::new(); let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); + let mut total_alpha_value_u128: u128 = 0; + + for ((hot, cold, this_netuid), share_u64f64) in Alpha::::iter() { + if this_netuid != netuid { + continue; + } + + keys_to_remove.push((hot.clone(), cold.clone())); + if !hotkeys_seen.contains(&hot) { + hotkeys_seen.push(hot.clone()); + } + + // Primary: actual alpha value via share pool. + let pool = Self::get_alpha_share_pool(hot.clone(), netuid); + let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); + + // Fallback: if pool uninitialized (denominator/shared_value=0), treat raw Alpha share as value. + let val_u64 = if actual_val_u64 == 0 { + share_u64f64.saturating_to_num::() + } else { + actual_val_u64 + }; - for ((hot, cold, this_netuid), alpha) in Alpha::::iter() { - if this_netuid == netuid { - let a_bits: u128 = alpha.to_bits(); // <- was `alpha.into()`; that doesn't exist - total_alpha_bits = total_alpha_bits.saturating_add(a_bits); - stakers.push((hot, cold, a_bits)); + if val_u64 > 0 { + let val_u128 = val_u64 as u128; + total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); + stakers.push((hot, cold, val_u128)); } } - // 4. Pro‑rata distribution – TAO restaked to ROOT. - let subnet_tao_u64: u64 = SubnetTAO::::get(netuid).into(); - let root_netuid = NetUid::ROOT; + // 5) Determine the TAO pot and pre-adjust accounting to avoid double counting. + let pot_tao: TaoCurrency = SubnetTAO::::get(netuid); + let pot_u64: u64 = pot_tao.into(); + + if pot_u64 > 0 { + // Remove TAO from dissolving subnet BEFORE restaking to ROOT to keep TotalStake consistent. + SubnetTAO::::remove(netuid); + TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); + } - if total_alpha_bits > 0 && subnet_tao_u64 > 0 && !stakers.is_empty() { + // 6) Pro‑rata distribution of the pot by alpha value (largest‑remainder). + if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { struct Portion { hot: A, cold: C, - share: u64, - rem: u128, + share: u64, // TAO to restake on ROOT + rem: u128, // remainder for largest‑remainder method } - let pot_u128 = subnet_tao_u64 as u128; + let pot_u128: u128 = pot_u64 as u128; let mut portions: Vec> = Vec::with_capacity(stakers.len()); let mut distributed: u128 = 0; - for (hot, cold, a_bits) in &stakers { - let prod = pot_u128.saturating_mul(*a_bits); - let share_u128 = prod.checked_div(total_alpha_bits).unwrap_or_default(); - let share_u64 = share_u128.min(u64::MAX as u128) as u64; - distributed = distributed.saturating_add(share_u64 as u128); + for (hot, cold, alpha_val) in &stakers { + let prod: u128 = pot_u128.saturating_mul(*alpha_val); + let share_u128: u128 = prod.checked_div(total_alpha_value_u128).unwrap_or_default(); + let share_u64: u64 = share_u128.min(u128::from(u64::MAX)) as u64; + distributed = distributed.saturating_add(u128::from(share_u64)); - let rem = prod.checked_rem(total_alpha_bits).unwrap_or_default(); + let rem: u128 = prod.checked_rem(total_alpha_value_u128).unwrap_or_default(); portions.push(Portion { hot: hot.clone(), cold: cold.clone(), @@ -644,46 +678,56 @@ impl Pallet { }); } - // Largest‑remainder method; clamp for wasm32 (usize = 32‑bit). - let leftover = pot_u128.saturating_sub(distributed); + let leftover: u128 = pot_u128.saturating_sub(distributed); if leftover > 0 { portions.sort_by(|a, b| b.rem.cmp(&a.rem)); - let give = core::cmp::min(leftover, portions.len() as u128) as usize; + let give: usize = core::cmp::min(leftover, portions.len() as u128) as usize; for p in portions.iter_mut().take(give) { p.share = p.share.saturating_add(1); } } - // Restake into root and clean α records. + // Restake each portion into ROOT (stable 1:1), no limit required. + let root_netuid = NetUid::ROOT; for p in portions { if p.share > 0 { Self::stake_into_subnet( &p.hot, &p.cold, root_netuid, - p.share.into(), + TaoCurrency::from(p.share), TaoCurrency::from(0), false, )?; } - Alpha::::remove((&p.hot, &p.cold, netuid)); - } - } else { - // No α-out or no TAO – just clear α records. - for (hot, cold, _) in &stakers { - Alpha::::remove((hot.clone(), cold.clone(), netuid)); } } - // 5. Reset α in/out counters — use typed zeros (no inference issues). - SubnetAlphaIn::::insert(netuid, AlphaCurrency::from(0)); - SubnetAlphaOut::::insert(netuid, AlphaCurrency::from(0)); + // 7) Destroy all α-in/α-out state for this subnet. + // 7.a) Remove every (hot, cold, netuid) α entry. + for (hot, cold) in keys_to_remove { + Alpha::::remove((hot, cold, netuid)); + } + // 7.b) Clear share‑pool totals for each hotkey on this subnet. + for hot in hotkeys_seen { + TotalHotkeyAlpha::::remove(&hot, netuid); + TotalHotkeyShares::::remove(&hot, netuid); + } + // 7.c) Remove α‑in/α‑out counters (fully destroyed). + SubnetAlphaIn::::remove(netuid); + SubnetAlphaInProvided::::remove(netuid); + SubnetAlphaOut::::remove(netuid); + + // 8) Refund remaining lock to subnet owner: + // refund = max(0, lock_cost - owner_received_emission). + let refund_u64: u64 = + Into::::into(lock_cost).saturating_sub(owner_received_emission_u64); + + // Clear the locked balance on the subnet. + Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); - // 6. Refund remaining lock to subnet owner. - let refund_u64 = lock_cost_u64.saturating_sub(owner_received_emission_u64); - Self::set_subnet_locked_balance(netuid, TaoCurrency::from(0)); if refund_u64 > 0 { - // This helper expects runtime Balance (u64), not TaoCurrency. + // Add back to owner’s coldkey free balance (expects runtime Balance, not TaoCurrency). Self::add_balance_to_coldkey_account(&owner_coldkey, refund_u64); } From ba1585502147b158f6ab074de4a49e759f576633 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 18 Aug 2025 12:46:38 -0700 Subject: [PATCH 058/379] convert alpha to tao --- pallets/subtensor/src/coinbase/root.rs | 57 ++++++++++++++++--------- pallets/subtensor/src/tests/networks.rs | 56 ++++++++++++++++++------ 2 files changed, 82 insertions(+), 31 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 2a7b180584..7e6f522c06 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -23,6 +23,7 @@ use safe_math::*; use sp_core::Get; use substrate_fixed::types::{I64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// Fetches the total count of root network validators @@ -590,22 +591,40 @@ impl Pallet { let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); - // 3) Compute owner's received emission cut with saturation. - let total_emission_u128: u128 = Emission::::get(netuid) - .into_iter() - .fold(0u128, |acc, e| { - acc.saturating_add(Into::::into(e) as u128) - }); + // 3) Compute owner's received emission in TAO at current price. + // + // Emission:: is Vec. We: + // - sum emitted α, + // - apply owner fraction to get owner α, + // - convert owner α to τ using current price, + // - use that τ value for the refund formula. + let total_emitted_alpha_u128: u128 = + Emission::::get(netuid) + .into_iter() + .fold(0u128, |acc, e_alpha| { + let e_u64: u64 = Into::::into(e_alpha); + acc.saturating_add(e_u64 as u128) + }); let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); - let owner_received_emission_u64: u64 = U96F32::from_num(total_emission_u128) + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha_u128) .saturating_mul(owner_fraction) .floor() .saturating_to_num::(); - // 4) Enumerate all alpha entries on this subnet: + // Current α→τ price (TAO per 1 α) for this subnet. + let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); + + // Convert owner α to τ at current price; floor to integer τ. + let owner_emission_tau_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(cur_price) + .floor() + .saturating_to_num::(); + let owner_emission_tau: TaoCurrency = owner_emission_tau_u64.into(); + + // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. // - collect keys to remove, - // - collect per-(hot,cold) actual alpha value for pro-rata (with fallback to raw share), + // - per (hot,cold) α VALUE (not shares) with fallback to raw share if pool uninitialized, // - track hotkeys to clear pool totals. let mut keys_to_remove: Vec<(T::AccountId, T::AccountId)> = Vec::new(); let mut hotkeys_seen: Vec = Vec::new(); @@ -622,11 +641,11 @@ impl Pallet { hotkeys_seen.push(hot.clone()); } - // Primary: actual alpha value via share pool. + // Primary: actual α value via share pool. let pool = Self::get_alpha_share_pool(hot.clone(), netuid); let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); - // Fallback: if pool uninitialized (denominator/shared_value=0), treat raw Alpha share as value. + // Fallback: if pool uninitialized, treat raw Alpha share as value. let val_u64 = if actual_val_u64 == 0 { share_u64f64.saturating_to_num::() } else { @@ -650,7 +669,7 @@ impl Pallet { TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); } - // 6) Pro‑rata distribution of the pot by alpha value (largest‑remainder). + // 6) Pro‑rata distribution of the pot by α value (largest‑remainder). if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { struct Portion { hot: A, @@ -687,7 +706,7 @@ impl Pallet { } } - // Restake each portion into ROOT (stable 1:1), no limit required. + // Restake each portion into ROOT (stable 1:1), no price limit required. let root_netuid = NetUid::ROOT; for p in portions { if p.share > 0 { @@ -719,20 +738,20 @@ impl Pallet { SubnetAlphaOut::::remove(netuid); // 8) Refund remaining lock to subnet owner: - // refund = max(0, lock_cost - owner_received_emission). - let refund_u64: u64 = - Into::::into(lock_cost).saturating_sub(owner_received_emission_u64); + // refund = max(0, lock_cost(τ) − owner_received_emission_in_τ). + let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tau); // Clear the locked balance on the subnet. Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); - if refund_u64 > 0 { - // Add back to owner’s coldkey free balance (expects runtime Balance, not TaoCurrency). - Self::add_balance_to_coldkey_account(&owner_coldkey, refund_u64); + if !refund.is_zero() { + // Add back to owner’s coldkey free balance (expects runtime Balance u64). + Self::add_balance_to_coldkey_account(&owner_coldkey, refund.to_u64()); } Ok(()) } + pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); let total_networks: u16 = TotalNetworks::::get(); diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index ba107c73a4..1b216fdfca 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -4,7 +4,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; -use substrate_fixed::types::U64F64; +use substrate_fixed::types::{U64F64, U96F32}; use subtensor_runtime_common::TaoCurrency; use subtensor_swap_interface::SwapHandler; @@ -170,13 +170,13 @@ fn dissolve_owner_cut_refund_logic() { let oh = U256::from(71); let net = add_dynamic_network(&oh, &oc); - // staker + // One staker and a TAO pot (not relevant to refund amount). let sh = U256::from(77); let sc = U256::from(88); Alpha::::insert((sh, sc, net), U64F64::from_num(100u128)); SubnetTAO::::insert(net, TaoCurrency::from(1_000)); - // lock & emission + // Lock & emissions: total emitted α = 800. let lock: TaoCurrency = TaoCurrency::from(2_000); SubtensorModule::set_subnet_locked_balance(net, lock); Emission::::insert( @@ -184,17 +184,36 @@ fn dissolve_owner_cut_refund_logic() { vec![AlphaCurrency::from(200), AlphaCurrency::from(600)], ); - // 18 % owner-cut + // Owner cut = 11796 / 65535 (about 18%). SubnetOwnerCut::::put(11_796u16); - let frac = 11_796f64 / 65_535f64; - let owner_em: TaoCurrency = TaoCurrency::from((800f64 * frac).floor() as u64); - let expect = lock.saturating_sub(owner_em); + + // Compute expected refund with the SAME math as the pallet. + let frac: U96F32 = SubtensorModule::get_float_subnet_owner_cut(); + let total_emitted_alpha: u64 = 800; + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha) + .saturating_mul(frac) + .floor() + .saturating_to_num::(); + + // Current α→τ price for this subnet. + let price: U96F32 = + ::SwapInterface::current_alpha_price(net.into()); + let owner_emission_tau_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::(); + + let expected_refund: TaoCurrency = + lock.saturating_sub(TaoCurrency::from(owner_emission_tau_u64)); let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); let after = SubtensorModule::get_coldkey_balance(&oc); - assert_eq!(TaoCurrency::from(after), TaoCurrency::from(before) + expect); + assert_eq!( + TaoCurrency::from(after), + TaoCurrency::from(before) + expected_refund + ); }); } @@ -534,7 +553,6 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { // Runtime-exact min amount = min_stake + fee let min_amount = { let min_stake = DefaultMinStake::::get(); - // Use the same helper pallet uses in validate_add_stake let fee = ::SwapInterface::approx_fee_amount( netuid.into(), min_stake.into(), @@ -587,7 +605,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { AlphaCurrency::from(1_500), ], ); - SubnetOwnerCut::::put(32_768u16); // = 0.5 in fixed-point + SubnetOwnerCut::::put(32_768u16); // ~ 0.5 in fixed-point // ── 4) balances & α on ROOT before ────────────────────────────────── let root = NetUid::ROOT; @@ -617,6 +635,22 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { share[idx[i]] += 1; } + // ── 5b) expected owner refund with price-aware emission deduction ─── + let frac: U96F32 = SubtensorModule::get_float_subnet_owner_cut(); + let total_emitted_alpha: u64 = 1_000 + 2_000 + 1_500; // 4500 α + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha) + .saturating_mul(frac) + .floor() + .saturating_to_num::(); + + let price: U96F32 = + ::SwapInterface::current_alpha_price(netuid.into()); + let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::(); + let expected_refund: u64 = lock.saturating_sub(owner_emission_tao_u64); + // ── 6) run burn-and-restake ──────────────────────────────────────── assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); @@ -643,8 +677,6 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { } // owner refund - let owner_em = (4_500u128 * 32_768u128 / 65_535u128) as u64; // same math pallet uses - let expected_refund = lock.saturating_sub(owner_em); assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), owner_before + expected_refund From fe780c33bbfbd6d55805a89240965f00c7226ae0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 18 Aug 2025 13:21:18 -0700 Subject: [PATCH 059/379] rename symbol --- pallets/subtensor/src/coinbase/root.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 7e6f522c06..356126a828 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -616,11 +616,11 @@ impl Pallet { let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); // Convert owner α to τ at current price; floor to integer τ. - let owner_emission_tau_u64: u64 = U96F32::from_num(owner_alpha_u64) + let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) .saturating_mul(cur_price) .floor() .saturating_to_num::(); - let owner_emission_tau: TaoCurrency = owner_emission_tau_u64.into(); + let owner_emission_tau: TaoCurrency = owner_emission_tao_u64.into(); // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. // - collect keys to remove, From 7ddf95ed3497f30b3a22bf7962033f241d90ab36 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 18 Aug 2025 15:11:55 -0700 Subject: [PATCH 060/379] unwind liquidity providers --- pallets/subtensor/src/coinbase/root.rs | 187 +-------- pallets/subtensor/src/staking/remove_stake.rs | 173 +++++++++ pallets/swap-interface/src/lib.rs | 1 + pallets/swap/src/pallet/impls.rs | 136 ++++++- pallets/swap/src/pallet/tests.rs | 366 ++++++++++++++++++ 5 files changed, 684 insertions(+), 179 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 356126a828..477a42f3b4 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -21,7 +21,7 @@ use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; -use substrate_fixed::types::{I64F64, U96F32}; +use substrate_fixed::types::I64F64; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; @@ -367,17 +367,20 @@ impl Pallet { /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { - // --- Perform the dtTao-compatible cleanup before removing the network. - Self::destroy_alpha_in_out_stakes(netuid)?; - - // --- Finally, remove the network entirely. + // 1. --- The network exists? ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); + + // 2. --- Perform the cleanup before removing the network. + T::SwapInterface::liquidate_all_liquidity_providers(netuid)?; + Self::destroy_alpha_in_out_stakes(netuid)?; + + // 3. --- Remove the network Self::remove_network(netuid); - // --- 6. Emit the NetworkRemoved event. + // 4. --- Emit the NetworkRemoved event log::debug!("NetworkRemoved( netuid:{netuid:?} )"); Self::deposit_event(Event::NetworkRemoved(netuid)); @@ -580,178 +583,6 @@ impl Pallet { LastRateLimitedBlock::::set(rate_limit_key, block); } - pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { - // 1) Ensure the subnet exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); - - // 2) Owner / lock cost. - let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); - let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); - - // 3) Compute owner's received emission in TAO at current price. - // - // Emission:: is Vec. We: - // - sum emitted α, - // - apply owner fraction to get owner α, - // - convert owner α to τ using current price, - // - use that τ value for the refund formula. - let total_emitted_alpha_u128: u128 = - Emission::::get(netuid) - .into_iter() - .fold(0u128, |acc, e_alpha| { - let e_u64: u64 = Into::::into(e_alpha); - acc.saturating_add(e_u64 as u128) - }); - - let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); - let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha_u128) - .saturating_mul(owner_fraction) - .floor() - .saturating_to_num::(); - - // Current α→τ price (TAO per 1 α) for this subnet. - let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); - - // Convert owner α to τ at current price; floor to integer τ. - let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) - .saturating_mul(cur_price) - .floor() - .saturating_to_num::(); - let owner_emission_tau: TaoCurrency = owner_emission_tao_u64.into(); - - // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. - // - collect keys to remove, - // - per (hot,cold) α VALUE (not shares) with fallback to raw share if pool uninitialized, - // - track hotkeys to clear pool totals. - let mut keys_to_remove: Vec<(T::AccountId, T::AccountId)> = Vec::new(); - let mut hotkeys_seen: Vec = Vec::new(); - let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); - let mut total_alpha_value_u128: u128 = 0; - - for ((hot, cold, this_netuid), share_u64f64) in Alpha::::iter() { - if this_netuid != netuid { - continue; - } - - keys_to_remove.push((hot.clone(), cold.clone())); - if !hotkeys_seen.contains(&hot) { - hotkeys_seen.push(hot.clone()); - } - - // Primary: actual α value via share pool. - let pool = Self::get_alpha_share_pool(hot.clone(), netuid); - let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); - - // Fallback: if pool uninitialized, treat raw Alpha share as value. - let val_u64 = if actual_val_u64 == 0 { - share_u64f64.saturating_to_num::() - } else { - actual_val_u64 - }; - - if val_u64 > 0 { - let val_u128 = val_u64 as u128; - total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); - stakers.push((hot, cold, val_u128)); - } - } - - // 5) Determine the TAO pot and pre-adjust accounting to avoid double counting. - let pot_tao: TaoCurrency = SubnetTAO::::get(netuid); - let pot_u64: u64 = pot_tao.into(); - - if pot_u64 > 0 { - // Remove TAO from dissolving subnet BEFORE restaking to ROOT to keep TotalStake consistent. - SubnetTAO::::remove(netuid); - TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); - } - - // 6) Pro‑rata distribution of the pot by α value (largest‑remainder). - if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { - struct Portion { - hot: A, - cold: C, - share: u64, // TAO to restake on ROOT - rem: u128, // remainder for largest‑remainder method - } - - let pot_u128: u128 = pot_u64 as u128; - let mut portions: Vec> = Vec::with_capacity(stakers.len()); - let mut distributed: u128 = 0; - - for (hot, cold, alpha_val) in &stakers { - let prod: u128 = pot_u128.saturating_mul(*alpha_val); - let share_u128: u128 = prod.checked_div(total_alpha_value_u128).unwrap_or_default(); - let share_u64: u64 = share_u128.min(u128::from(u64::MAX)) as u64; - distributed = distributed.saturating_add(u128::from(share_u64)); - - let rem: u128 = prod.checked_rem(total_alpha_value_u128).unwrap_or_default(); - portions.push(Portion { - hot: hot.clone(), - cold: cold.clone(), - share: share_u64, - rem, - }); - } - - let leftover: u128 = pot_u128.saturating_sub(distributed); - if leftover > 0 { - portions.sort_by(|a, b| b.rem.cmp(&a.rem)); - let give: usize = core::cmp::min(leftover, portions.len() as u128) as usize; - for p in portions.iter_mut().take(give) { - p.share = p.share.saturating_add(1); - } - } - - // Restake each portion into ROOT (stable 1:1), no price limit required. - let root_netuid = NetUid::ROOT; - for p in portions { - if p.share > 0 { - Self::stake_into_subnet( - &p.hot, - &p.cold, - root_netuid, - TaoCurrency::from(p.share), - TaoCurrency::from(0), - false, - )?; - } - } - } - - // 7) Destroy all α-in/α-out state for this subnet. - // 7.a) Remove every (hot, cold, netuid) α entry. - for (hot, cold) in keys_to_remove { - Alpha::::remove((hot, cold, netuid)); - } - // 7.b) Clear share‑pool totals for each hotkey on this subnet. - for hot in hotkeys_seen { - TotalHotkeyAlpha::::remove(&hot, netuid); - TotalHotkeyShares::::remove(&hot, netuid); - } - // 7.c) Remove α‑in/α‑out counters (fully destroyed). - SubnetAlphaIn::::remove(netuid); - SubnetAlphaInProvided::::remove(netuid); - SubnetAlphaOut::::remove(netuid); - - // 8) Refund remaining lock to subnet owner: - // refund = max(0, lock_cost(τ) − owner_received_emission_in_τ). - let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tau); - - // Clear the locked balance on the subnet. - Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); - - if !refund.is_zero() { - // Add back to owner’s coldkey free balance (expects runtime Balance u64). - Self::add_balance_to_coldkey_account(&owner_coldkey, refund.to_u64()); - } - - Ok(()) - } - pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); let total_networks: u16 = TotalNetworks::::get(); diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index c0311f7f33..5249d6782e 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -1,6 +1,7 @@ use subtensor_swap_interface::{OrderType, SwapHandler}; use super::*; +use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; impl Pallet { @@ -438,4 +439,176 @@ impl Pallet { Self::do_remove_stake(origin, hotkey, netuid, alpha_unstaked) } } + + pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { + // 1) Ensure the subnet exists. + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // 2) Owner / lock cost. + let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); + let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); + + // 3) Compute owner's received emission in TAO at current price. + // + // Emission:: is Vec. We: + // - sum emitted α, + // - apply owner fraction to get owner α, + // - convert owner α to τ using current price, + // - use that τ value for the refund formula. + let total_emitted_alpha_u128: u128 = + Emission::::get(netuid) + .into_iter() + .fold(0u128, |acc, e_alpha| { + let e_u64: u64 = Into::::into(e_alpha); + acc.saturating_add(e_u64 as u128) + }); + + let owner_fraction: U96F32 = Self::get_float_subnet_owner_cut(); + let owner_alpha_u64: u64 = U96F32::from_num(total_emitted_alpha_u128) + .saturating_mul(owner_fraction) + .floor() + .saturating_to_num::(); + + // Current α→τ price (TAO per 1 α) for this subnet. + let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); + + // Convert owner α to τ at current price; floor to integer τ. + let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(cur_price) + .floor() + .saturating_to_num::(); + let owner_emission_tau: TaoCurrency = owner_emission_tao_u64.into(); + + // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. + // - collect keys to remove, + // - per (hot,cold) α VALUE (not shares) with fallback to raw share if pool uninitialized, + // - track hotkeys to clear pool totals. + let mut keys_to_remove: Vec<(T::AccountId, T::AccountId)> = Vec::new(); + let mut hotkeys_seen: Vec = Vec::new(); + let mut stakers: Vec<(T::AccountId, T::AccountId, u128)> = Vec::new(); + let mut total_alpha_value_u128: u128 = 0; + + for ((hot, cold, this_netuid), share_u64f64) in Alpha::::iter() { + if this_netuid != netuid { + continue; + } + + keys_to_remove.push((hot.clone(), cold.clone())); + if !hotkeys_seen.contains(&hot) { + hotkeys_seen.push(hot.clone()); + } + + // Primary: actual α value via share pool. + let pool = Self::get_alpha_share_pool(hot.clone(), netuid); + let actual_val_u64 = pool.try_get_value(&cold).unwrap_or(0); + + // Fallback: if pool uninitialized, treat raw Alpha share as value. + let val_u64 = if actual_val_u64 == 0 { + share_u64f64.saturating_to_num::() + } else { + actual_val_u64 + }; + + if val_u64 > 0 { + let val_u128 = val_u64 as u128; + total_alpha_value_u128 = total_alpha_value_u128.saturating_add(val_u128); + stakers.push((hot, cold, val_u128)); + } + } + + // 5) Determine the TAO pot and pre-adjust accounting to avoid double counting. + let pot_tao: TaoCurrency = SubnetTAO::::get(netuid); + let pot_u64: u64 = pot_tao.into(); + + if pot_u64 > 0 { + // Remove TAO from dissolving subnet BEFORE restaking to ROOT to keep TotalStake consistent. + SubnetTAO::::remove(netuid); + TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); + } + + // 6) Pro‑rata distribution of the pot by α value (largest‑remainder). + if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { + struct Portion { + hot: A, + cold: C, + share: u64, // TAO to restake on ROOT + rem: u128, // remainder for largest‑remainder method + } + + let pot_u128: u128 = pot_u64 as u128; + let mut portions: Vec> = Vec::with_capacity(stakers.len()); + let mut distributed: u128 = 0; + + for (hot, cold, alpha_val) in &stakers { + let prod: u128 = pot_u128.saturating_mul(*alpha_val); + let share_u128: u128 = prod.checked_div(total_alpha_value_u128).unwrap_or_default(); + let share_u64: u64 = share_u128.min(u128::from(u64::MAX)) as u64; + distributed = distributed.saturating_add(u128::from(share_u64)); + + let rem: u128 = prod.checked_rem(total_alpha_value_u128).unwrap_or_default(); + portions.push(Portion { + hot: hot.clone(), + cold: cold.clone(), + share: share_u64, + rem, + }); + } + + let leftover: u128 = pot_u128.saturating_sub(distributed); + if leftover > 0 { + portions.sort_by(|a, b| b.rem.cmp(&a.rem)); + let give: usize = core::cmp::min(leftover, portions.len() as u128) as usize; + for p in portions.iter_mut().take(give) { + p.share = p.share.saturating_add(1); + } + } + + // Restake each portion into ROOT (stable 1:1), no price limit required. + let root_netuid = NetUid::ROOT; + for p in portions { + if p.share > 0 { + Self::stake_into_subnet( + &p.hot, + &p.cold, + root_netuid, + TaoCurrency::from(p.share), + TaoCurrency::from(0), + false, + )?; + } + } + } + + // 7) Destroy all α-in/α-out state for this subnet. + // 7.a) Remove every (hot, cold, netuid) α entry. + for (hot, cold) in keys_to_remove { + Alpha::::remove((hot, cold, netuid)); + } + // 7.b) Clear share‑pool totals for each hotkey on this subnet. + for hot in hotkeys_seen { + TotalHotkeyAlpha::::remove(&hot, netuid); + TotalHotkeyShares::::remove(&hot, netuid); + } + // 7.c) Remove α‑in/α‑out counters (fully destroyed). + SubnetAlphaIn::::remove(netuid); + SubnetAlphaInProvided::::remove(netuid); + SubnetAlphaOut::::remove(netuid); + + // 8) Refund remaining lock to subnet owner: + // refund = max(0, lock_cost(τ) − owner_received_emission_in_τ). + let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tau); + + // Clear the locked balance on the subnet. + Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); + + if !refund.is_zero() { + // Add back to owner’s coldkey free balance (expects runtime Balance u64). + Self::add_balance_to_coldkey_account(&owner_coldkey, refund.to_u64()); + } + + Ok(()) + } } diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index a0b39e151f..f29357c741 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -34,6 +34,7 @@ pub trait SwapHandler { alpha_delta: AlphaCurrency, ); fn is_user_liquidity_enabled(netuid: NetUid) -> bool; + fn liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult; } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index deebabd673..ff715bede8 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -5,7 +5,7 @@ use frame_support::storage::{TransactionOutcome, transactional}; use frame_support::{ensure, pallet_prelude::DispatchError, traits::Get}; use safe_math::*; use sp_arithmetic::helpers_128bit; -use sp_runtime::traits::AccountIdConversion; +use sp_runtime::{DispatchResult, traits::AccountIdConversion}; use substrate_fixed::types::{I64F64, U64F64, U96F32}; use subtensor_runtime_common::{ AlphaCurrency, BalanceOps, Currency, NetUid, SubnetInfo, TaoCurrency, @@ -1212,6 +1212,137 @@ impl Pallet { pub fn protocol_account_id() -> T::AccountId { T::ProtocolId::get().into_account_truncating() } + /// Liquidate (force-close) all LPs for `netuid`, **refund** providers, and reset all swap state. + /// + /// - **V3 path** (mechanism==1 && SwapV3Initialized): + /// * Remove **all** positions (user + protocol) via `do_remove_liquidity`. + /// * **Refund** each owner: TAO = `rm.tao + rm.fee_tao`, ALPHA = `rm.alpha + rm.fee_alpha`, + /// using `T::BalanceOps::{deposit_tao, deposit_alpha}`. + /// * Clear ActiveTickIndexManager entries, ticks, fee globals, price, tick, liquidity, + /// init flag, bitmap words, fee rate knob, and user LP flag. + /// + /// - **V2 / non‑V3 path**: + /// * No per‑position records exist; still defensively clear the same V3 storages + /// (safe no‑ops) so the subnet leaves **no swap residue**. + pub fn do_liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + let mechid = T::SubnetInfo::mechanism(netuid.into()); + let v3_initialized = SwapV3Initialized::::get(netuid); + let user_lp_enabled = + >::is_user_liquidity_enabled(netuid); + + let is_v3_mode = mechid == 1 && v3_initialized; + + if is_v3_mode { + // -------- V3: close every position, REFUND owners, then clear all V3 state -------- + + // 1) Snapshot all (owner, position_id) under this netuid to avoid iterator aliasing. + let mut to_close: sp_std::vec::Vec<(T::AccountId, PositionId)> = + sp_std::vec::Vec::new(); + for ((n, owner, pos_id), _pos) in Positions::::iter() { + if n == netuid { + to_close.push((owner, pos_id)); + } + } + + let protocol_account = Self::protocol_account_id(); + + // 2) Remove all positions (user + protocol) and REFUND both legs to the owner. + for (owner, pos_id) in to_close.into_iter() { + let rm = Self::do_remove_liquidity(netuid, &owner, pos_id)?; + + // Refund TAO: principal + accrued TAO fees. + let tao_refund = rm.tao.saturating_add(rm.fee_tao); + if tao_refund > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, tao_refund); + } + + // Refund ALPHA: principal + accrued ALPHA fees. + let alpha_refund = rm.alpha.saturating_add(rm.fee_alpha); + if !alpha_refund.is_zero() { + // Credit ALPHA back to the provider on (coldkey=owner, hotkey=owner). + T::BalanceOps::increase_stake(&owner, &owner, netuid.into(), alpha_refund)?; + } + + // Mirror `remove_liquidity`: update **user-provided** reserves by principal only. + // Skip for protocol-owned liquidity which never contributed to provided reserves. + if owner != protocol_account { + T::BalanceOps::decrease_provided_tao_reserve(netuid.into(), rm.tao); + T::BalanceOps::decrease_provided_alpha_reserve(netuid.into(), rm.alpha); + } + } + + // 3) Clear active tick index set by walking ticks we are about to clear. + let active_ticks: sp_std::vec::Vec = + Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); + for ti in active_ticks { + ActiveTickIndexManager::::remove(netuid, ti); + } + + // 4) Clear storage: + // Positions (StorageNMap) – prefix is **(netuid,)** not just netuid. + let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); + + // Ticks (DoubleMap) – OK to pass netuid as first key. + let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); + + // Fee globals, price/tick/liquidity, v3 init flag. + FeeGlobalTao::::remove(netuid); + FeeGlobalAlpha::::remove(netuid); + CurrentLiquidity::::remove(netuid); + CurrentTick::::remove(netuid); + AlphaSqrtPrice::::remove(netuid); + SwapV3Initialized::::remove(netuid); + + // Active tick bitmap words (StorageNMap) – prefix is **(netuid,)**. + let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); + + // Remove knobs (safe on deregistration). + FeeRate::::remove(netuid); + EnabledUserLiquidity::::remove(netuid); + + log::debug!( + "liquidate_all_liquidity_providers: netuid={:?}, mode=V3, user_lp_enabled={}, v3_state_cleared + refunds", + netuid, + user_lp_enabled + ); + + return Ok(()); + } + + // -------- V2 / non‑V3: no positions to close; still nuke any V3 residues -------- + + // Positions (StorageNMap) – prefix is (netuid,) + let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); + + // Active ticks set via ticks present (if any) + let active_ticks: sp_std::vec::Vec = + Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); + for ti in active_ticks { + ActiveTickIndexManager::::remove(netuid, ti); + } + + let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); + + FeeGlobalTao::::remove(netuid); + FeeGlobalAlpha::::remove(netuid); + CurrentLiquidity::::remove(netuid); + CurrentTick::::remove(netuid); + AlphaSqrtPrice::::remove(netuid); + SwapV3Initialized::::remove(netuid); + + let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); + + FeeRate::::remove(netuid); + EnabledUserLiquidity::::remove(netuid); + + log::debug!( + "liquidate_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, user_lp_enabled={}, state_cleared", + netuid, + user_lp_enabled + ); + + Ok(()) + } } impl SwapHandler for Pallet { @@ -1297,6 +1428,9 @@ impl SwapHandler for Pallet { fn is_user_liquidity_enabled(netuid: NetUid) -> bool { EnabledUserLiquidity::::get(netuid) } + fn liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + Self::do_liquidate_all_liquidity_providers(netuid) + } } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 4c3a890c9b..6b5204f536 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -1910,3 +1910,369 @@ fn test_less_price_movement() { }); }); } + +/// V3 path: protocol + user positions exist, fees accrued, everything must be removed. +#[test] +fn test_liquidate_v3_removes_positions_ticks_and_state() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + + // Initialize V3 (creates protocol position, ticks, price, liquidity) + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Enable user LP (mock usually enables for 0..=100, but be explicit and consistent) + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + + // Add a user position across the full range to ensure ticks/bitmap are populated. + let min_price = tick_to_price(TickIndex::MIN); + let max_price = tick_to_price(TickIndex::MAX); + let tick_low = price_to_tick(min_price); + let tick_high = price_to_tick(max_price); + let liquidity = 2_000_000_000_u64; + + let (_pos_id, _tao, _alpha) = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + liquidity, + ) + .expect("add liquidity"); + + // Accrue some global fees so we can verify fee storage is cleared later. + let sqrt_limit_price = SqrtPrice::from_num(1_000_000.0); + assert_ok!(Pallet::::do_swap( + netuid, + OrderType::Buy, + 1_000_000, + sqrt_limit_price, + false, + false + )); + + // Sanity: protocol & user positions exist, ticks exist, liquidity > 0 + let protocol_id = Pallet::::protocol_account_id(); + let prot_positions = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!(!prot_positions.is_empty()); + + let user_positions = Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .collect::>(); + assert_eq!(user_positions.len(), 1); + + assert!(Ticks::::get(netuid, TickIndex::MIN).is_some()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_some()); + assert!(CurrentLiquidity::::get(netuid) > 0); + + // There should be some bitmap words (active ticks) after adding a position. + let had_bitmap_words = TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_some(); + assert!(had_bitmap_words); + + // ACT: Liquidate & reset swap state + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + // ASSERT: positions cleared (both user and protocol) + assert_eq!( + Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), + 0 + ); + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!(prot_positions_after.is_empty()); + let user_positions_after = + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .collect::>(); + assert!(user_positions_after.is_empty()); + + // ASSERT: ticks cleared + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(Ticks::::get(netuid, TickIndex::MIN).is_none()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_none()); + + // ASSERT: fee globals cleared + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // ASSERT: price/tick/liquidity flags cleared + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + + // ASSERT: active tick bitmap cleared + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + + // ASSERT: knobs removed on dereg + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +/// V3 path with user liquidity disabled at teardown: must still remove all positions and clear state. +#[test] +fn test_liquidate_v3_with_user_liquidity_disabled() { + new_test_ext().execute_with(|| { + // Pick a netuid the mock treats as "disabled" by default (per your comment >100), + // then explicitly walk through enable -> add -> disable -> liquidate. + let netuid = NetUid::from(101); + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Enable temporarily to add a user position + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + + let min_price = tick_to_price(TickIndex::MIN); + let max_price = tick_to_price(TickIndex::MAX); + let tick_low = price_to_tick(min_price); + let tick_high = price_to_tick(max_price); + let liquidity = 1_000_000_000_u64; + + let (_pos_id, _tao, _alpha) = Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + liquidity, + ) + .expect("add liquidity"); + + // Disable user LP *before* liquidation to validate that removal ignores this flag. + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + false + )); + + // ACT + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + // ASSERT: positions & ticks gone, state reset + assert_eq!( + Pallet::::count_positions(netuid, &OK_COLDKEY_ACCOUNT_ID), + 0 + ); + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // `EnabledUserLiquidity` is removed by liquidation. + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +/// Non‑V3 path: V3 not initialized (no positions); function must still clear any residual storages and succeed. +#[test] +fn test_liquidate_non_v3_uninitialized_ok_and_clears() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(202); + + // Sanity: V3 is not initialized + assert!(!SwapV3Initialized::::get(netuid)); + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + + // ACT + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + // ASSERT: Defensive clears leave no residues and do not panic + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + + // All single-key maps should not have the key after liquidation + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +/// Idempotency: calling liquidation twice is safe (both V3 and non‑V3 flavors). +#[test] +fn test_liquidate_idempotent() { + // V3 flavor + new_test_ext().execute_with(|| { + let netuid = NetUid::from(7); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Add a small user position + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + let tick_low = price_to_tick(0.2); + let tick_high = price_to_tick(0.3); + assert_ok!(Pallet::::do_add_liquidity( + netuid, + &OK_COLDKEY_ACCOUNT_ID, + &OK_HOTKEY_ACCOUNT_ID, + tick_low, + tick_high, + 123_456_789 + )); + + // 1st liquidation + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + // 2nd liquidation (no state left) — must still succeed + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + // State remains empty + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); + + // Non‑V3 flavor + new_test_ext().execute_with(|| { + let netuid = NetUid::from(8); + + // Never initialize V3 + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + assert!( + Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} + +#[test] +fn liquidate_v3_refunds_user_funds_and_clears_state() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + + // Enable V3 path & initialize price/ticks (also creates a protocol position). + assert_ok!(Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid, + true + )); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Use distinct cold/hot to demonstrate alpha refund goes to (owner, owner). + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + // Tight in‑range band around current tick. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.saturating_sub(10); + let tick_high = ct.saturating_add(10); + let liquidity: u64 = 1_000_000; + + // Snapshot balances BEFORE. + let tao_before = ::BalanceOps::tao_balance(&cold); + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_total = alpha_before_hot + alpha_before_owner; + + // Create the user position (storage & v3 state only; no balances moved yet). + let (_pos_id, need_tao, need_alpha) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add liquidity"); + + // Mirror extrinsic bookkeeping: withdraw funds & bump provided‑reserve counters. + let tao_taken = ::BalanceOps::decrease_balance(&cold, need_tao.into()) + .expect("decrease TAO"); + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + need_alpha.into(), + ) + .expect("decrease ALPHA"); + ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // Liquidate everything on the subnet. + assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + + // Expect balances restored to BEFORE snapshots (no swaps ran -> zero fees). + // TAO: we withdrew 'need_tao' above and liquidation refunded it, so we should be back to 'tao_before'. + let tao_after = ::BalanceOps::tao_balance(&cold); + assert_eq!(tao_after, tao_before, "TAO principal must be refunded"); + + // ALPHA: refund is credited to (coldkey=cold, hotkey=cold). Compare totals across both ledgers. + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "ALPHA principal must be refunded to the account (may be credited to (owner, owner))" + ); + + // User position(s) are gone and all V3 state cleared. + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} From 2ae2eb65185bb09ea0c46c5990560b96d3f5f8b4 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 19 Aug 2025 13:36:46 -0700 Subject: [PATCH 061/379] add stake to balance --- pallets/subtensor/src/staking/remove_stake.rs | 28 ++++++------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 5249d6782e..0df38eb71c 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -452,7 +452,6 @@ impl Pallet { let lock_cost: TaoCurrency = Self::get_subnet_locked_balance(netuid); // 3) Compute owner's received emission in TAO at current price. - // // Emission:: is Vec. We: // - sum emitted α, // - apply owner fraction to get owner α, @@ -480,7 +479,7 @@ impl Pallet { .saturating_mul(cur_price) .floor() .saturating_to_num::(); - let owner_emission_tau: TaoCurrency = owner_emission_tao_u64.into(); + let owner_emission_tao: TaoCurrency = owner_emission_tao_u64.into(); // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. // - collect keys to remove, @@ -524,17 +523,17 @@ impl Pallet { let pot_u64: u64 = pot_tao.into(); if pot_u64 > 0 { - // Remove TAO from dissolving subnet BEFORE restaking to ROOT to keep TotalStake consistent. SubnetTAO::::remove(netuid); TotalStake::::mutate(|total| *total = total.saturating_sub(pot_tao)); } - // 6) Pro‑rata distribution of the pot by α value (largest‑remainder). + // 6) Pro‑rata distribution of the pot by α value (largest‑remainder), + // **credited directly to each staker's COLDKEY free balance**. if pot_u64 > 0 && total_alpha_value_u128 > 0 && !stakers.is_empty() { struct Portion { - hot: A, + _hot: A, cold: C, - share: u64, // TAO to restake on ROOT + share: u64, // TAO to credit to coldkey balance rem: u128, // remainder for largest‑remainder method } @@ -550,7 +549,7 @@ impl Pallet { let rem: u128 = prod.checked_rem(total_alpha_value_u128).unwrap_or_default(); portions.push(Portion { - hot: hot.clone(), + _hot: hot.clone(), cold: cold.clone(), share: share_u64, rem, @@ -566,18 +565,10 @@ impl Pallet { } } - // Restake each portion into ROOT (stable 1:1), no price limit required. - let root_netuid = NetUid::ROOT; + // Credit each share directly to coldkey free balance. for p in portions { if p.share > 0 { - Self::stake_into_subnet( - &p.hot, - &p.cold, - root_netuid, - TaoCurrency::from(p.share), - TaoCurrency::from(0), - false, - )?; + Self::add_balance_to_coldkey_account(&p.cold, p.share); } } } @@ -599,13 +590,12 @@ impl Pallet { // 8) Refund remaining lock to subnet owner: // refund = max(0, lock_cost(τ) − owner_received_emission_in_τ). - let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tau); + let refund: TaoCurrency = lock_cost.saturating_sub(owner_emission_tao); // Clear the locked balance on the subnet. Self::set_subnet_locked_balance(netuid, TaoCurrency::ZERO); if !refund.is_zero() { - // Add back to owner’s coldkey free balance (expects runtime Balance u64). Self::add_balance_to_coldkey_account(&owner_coldkey, refund.to_u64()); } From 63bad12cb1e2197ca772add5c663994b25f667f2 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 19 Aug 2025 15:39:06 -0700 Subject: [PATCH 062/379] update tests --- pallets/subtensor/src/tests/networks.rs | 190 +++++++++++++----------- 1 file changed, 106 insertions(+), 84 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 1b216fdfca..baa3910fa6 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -95,23 +95,24 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { let (s_hot, s_cold) = (U256::from(100), U256::from(200)); Alpha::::insert((s_hot, s_cold, net), U64F64::from_num(5_000u128)); - SubnetTAO::::insert(net, TaoCurrency::from(99_999)); + // Entire TAO pot should be paid to staker's cold-key + let pot: u64 = 99_999; + SubnetTAO::::insert(net, TaoCurrency::from(pot)); SubtensorModule::set_subnet_locked_balance(net, 0.into()); - // α on ROOT before - let root = NetUid::ROOT; - let alpha_before_root = - Alpha::::get((s_hot, s_cold, root)).saturating_to_num::(); + // Cold-key balance before + let before = SubtensorModule::get_coldkey_balance(&s_cold); - // 3. Dissolve + // Dissolve assert_ok!(SubtensorModule::do_dissolve_network(net)); - // 4. Entire TAO pot should now be α on root - let alpha_after_root = Alpha::::get((s_hot, s_cold, root)).saturating_to_num::(); - assert_eq!(alpha_after_root, alpha_before_root + 99_999); + // Cold-key received full pot + let after = SubtensorModule::get_coldkey_balance(&s_cold); + assert_eq!(after, before + pot); // No α entries left for dissolved subnet assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); + assert!(!SubnetTAO::::contains_key(net)); }); } @@ -129,33 +130,63 @@ fn dissolve_two_stakers_pro_rata_distribution() { Alpha::::insert((s1_hot, s1_cold, net), U64F64::from_num(a1)); Alpha::::insert((s2_hot, s2_cold, net), U64F64::from_num(a2)); - SubnetTAO::::insert(net, TaoCurrency::from(10_000)); - SubtensorModule::set_subnet_locked_balance(net, 5_000.into()); + let pot: u64 = 10_000; + SubnetTAO::::insert(net, TaoCurrency::from(pot)); + SubtensorModule::set_subnet_locked_balance(net, 5_000.into()); // owner refund path present but emission = 0 - // α on ROOT before - let root = NetUid::ROOT; - let a1_root_before = Alpha::::get((s1_hot, s1_cold, root)).saturating_to_num::(); - let a2_root_before = Alpha::::get((s2_hot, s2_cold, root)).saturating_to_num::(); - - // Run dissolve - assert_ok!(SubtensorModule::do_dissolve_network(net)); + // Cold-key balances before + let s1_before = SubtensorModule::get_coldkey_balance(&s1_cold); + let s2_before = SubtensorModule::get_coldkey_balance(&s2_cold); + let owner_before = SubtensorModule::get_coldkey_balance(&oc); - // Expected TAO shares + // Expected τ shares with largest remainder let total = a1 + a2; - let share1_tao: u64 = (10_000u128 * a1 / total) as u64; - let share2_tao: u64 = (10_000u128 * a2 / total) as u64; + let prod1 = (a1 as u128) * (pot as u128); + let prod2 = (a2 as u128) * (pot as u128); + let share1 = (prod1 / total) as u64; + let share2 = (prod2 / total) as u64; + let mut distributed = share1 + share2; + let mut rem = [(s1_cold, prod1 % total), (s2_cold, prod2 % total)]; + if distributed < pot { + rem.sort_by_key(|&(_c, r)| core::cmp::Reverse(r)); + let leftover = pot - distributed; + for i in 0..(leftover as usize) { + if rem[i].0 == s1_cold { + distributed += 1; + } else { + distributed += 1; + } + } + } + // Recompute exact expected shares using the same logic + let mut expected1 = share1; + let mut expected2 = share2; + if share1 + share2 < pot { + rem.sort_by_key(|&(_c, r)| core::cmp::Reverse(r)); + if rem[0].0 == s1_cold { + expected1 += 1; + } else { + expected2 += 1; + } + } - // α on root should have increased by those shares - let a1_root_after = Alpha::::get((s1_hot, s1_cold, root)).saturating_to_num::(); - let a2_root_after = Alpha::::get((s2_hot, s2_cold, root)).saturating_to_num::(); + // Dissolve + assert_ok!(SubtensorModule::do_dissolve_network(net)); - assert_eq!(a1_root_after, a1_root_before + share1_tao); - assert_eq!(a2_root_after, a2_root_before + share2_tao); + // Cold-keys received their τ shares + assert_eq!( + SubtensorModule::get_coldkey_balance(&s1_cold), + s1_before + expected1 + ); + assert_eq!( + SubtensorModule::get_coldkey_balance(&s2_cold), + s2_before + expected2 + ); - // owner refund (5 000 τ) still to cold-key + // Owner refunded lock (no emission) assert_eq!( SubtensorModule::get_coldkey_balance(&oc), - SubtensorModule::get_coldkey_balance(&oc) // unchanged; refund already applied internally + owner_before + 5_000 ); // α entries for dissolved subnet gone @@ -432,28 +463,25 @@ fn dissolve_rounding_remainder_distribution() { SubnetTAO::::insert(net, TaoCurrency::from(1)); // TAO pot = 1 SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); - // 2. α on ROOT before - let root = NetUid::ROOT; - let a1_before = Alpha::::get((s1h, s1c, root)).saturating_to_num::(); - let a2_before = Alpha::::get((s2h, s2c, root)).saturating_to_num::(); + // Cold-key balances before + let c1_before = SubtensorModule::get_coldkey_balance(&s1c); + let c2_before = SubtensorModule::get_coldkey_balance(&s2c); // 3. Run full dissolve flow assert_ok!(SubtensorModule::do_dissolve_network(net)); - // 4. s1 (larger remainder) should now have +1 α on ROOT - let a1_after = Alpha::::get((s1h, s1c, root)).saturating_to_num::(); - let a2_after = Alpha::::get((s2h, s2c, root)).saturating_to_num::(); + // 4. s1 (larger remainder) should get +1 τ on cold-key + let c1_after = SubtensorModule::get_coldkey_balance(&s1c); + let c2_after = SubtensorModule::get_coldkey_balance(&s2c); - assert_eq!(a1_after, a1_before + 1); - assert_eq!(a2_after, a2_before); + assert_eq!(c1_after, c1_before + 1); + assert_eq!(c2_after, c2_before); - // α records for subnet gone + // α records for subnet gone; TAO key gone assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != net)); - // TAO storage key gone assert!(!SubnetTAO::::contains_key(net)); }); } - #[test] fn destroy_alpha_out_multiple_stakers_pro_rata() { new_test_ext(0).execute_with(|| { @@ -500,40 +528,48 @@ fn destroy_alpha_out_multiple_stakers_pro_rata() { SubnetTAO::::insert(netuid, TaoCurrency::from(tao_pot)); SubtensorModule::set_subnet_locked_balance(netuid, TaoCurrency::from(5_000)); - // 6. Balances & α on the *root* network *before* - let root = NetUid::ROOT; - let bal1_before = SubtensorModule::get_coldkey_balance(&c1); - let bal2_before = SubtensorModule::get_coldkey_balance(&c2); + // 6. Balances before + let c1_before = SubtensorModule::get_coldkey_balance(&c1); + let c2_before = SubtensorModule::get_coldkey_balance(&c2); let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); - let alpha1_before_root: u64 = Alpha::::get((h1, c1, root)).saturating_to_num(); - let alpha2_before_root: u64 = Alpha::::get((h2, c2, root)).saturating_to_num(); - - // 7. Run the burn-and-restake logic + // 7. Run the (now credit-to-coldkey) logic assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); - // 8. Expected TAO shares - let share1_tao: u64 = (tao_pot as u128 * a1 / atotal) as u64; - let share2_tao: u64 = tao_pot - share1_tao; + // 8. Expected τ shares via largest remainder + let prod1 = (tao_pot as u128) * a1; + let prod2 = (tao_pot as u128) * a2; + let mut s1_share = (prod1 / atotal) as u64; + let mut s2_share = (prod2 / atotal) as u64; + let distributed = s1_share + s2_share; + if distributed < tao_pot { + // Assign leftover to larger remainder + let r1 = prod1 % atotal; + let r2 = prod2 % atotal; + if r1 >= r2 { + s1_share += 1; + } else { + s2_share += 1; + } + } - // 9. Assert cold-key balances unchanged (stakers) - assert_eq!(SubtensorModule::get_coldkey_balance(&c1), bal1_before); - assert_eq!(SubtensorModule::get_coldkey_balance(&c2), bal2_before); + // 9. Cold-key balances must have increased accordingly + assert_eq!( + SubtensorModule::get_coldkey_balance(&c1), + c1_before + s1_share + ); + assert_eq!( + SubtensorModule::get_coldkey_balance(&c2), + c2_before + s2_share + ); - // 10. Assert owner refund (5 000 τ) still hits cold-key + // 10. Owner refund (5 000 τ) to cold-key (no emission) assert_eq!( SubtensorModule::get_coldkey_balance(&owner_cold), owner_before + 5_000 ); - // 11. Assert α on ROOT increased by exactly the TAO restaked - let alpha1_after_root: u64 = Alpha::::get((h1, c1, root)).saturating_to_num(); - let alpha2_after_root: u64 = Alpha::::get((h2, c2, root)).saturating_to_num(); - - assert_eq!(alpha1_after_root, alpha1_before_root + share1_tao); - assert_eq!(alpha2_after_root, alpha2_before_root + share2_tao); - - // 12. No α entries left for the dissolved subnet + // 11. α entries cleared for the subnet assert!(!Alpha::::contains_key((h1, c1, netuid))); assert!(!Alpha::::contains_key((h2, c2, netuid))); }); @@ -607,17 +643,14 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { ); SubnetOwnerCut::::put(32_768u16); // ~ 0.5 in fixed-point - // ── 4) balances & α on ROOT before ────────────────────────────────── - let root = NetUid::ROOT; + // ── 4) balances before ────────────────────────────────────────────── let mut bal_before = [0u64; N]; - let mut alpha_before_root = [0u64; N]; for i in 0..N { bal_before[i] = SubtensorModule::get_coldkey_balance(&cold[i]); - alpha_before_root[i] = Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); } let owner_before = SubtensorModule::get_coldkey_balance(&owner_cold); - // ── 5) expected TAO share per pallet algorithm (incl. remainder) ──── + // ── 5) expected τ share per pallet algorithm (incl. remainder) ───── let mut share = [0u64; N]; let mut rem = [0u128; N]; let mut paid: u128 = 0; @@ -651,27 +684,16 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { .saturating_to_num::(); let expected_refund: u64 = lock.saturating_sub(owner_emission_tao_u64); - // ── 6) run burn-and-restake ──────────────────────────────────────── + // ── 6) run distribution (credits τ to coldkeys, wipes α state) ───── assert_ok!(SubtensorModule::destroy_alpha_in_out_stakes(netuid)); // ── 7) post checks ────────────────────────────────────────────────── for i in 0..N { - // cold-key balances unchanged + // cold-key balances increased by expected τ share assert_eq!( SubtensorModule::get_coldkey_balance(&cold[i]), - bal_before[i], - "staker {} cold-key balance changed", - i - ); - - // α added on ROOT == TAO share - let alpha_after_root: u64 = - Alpha::::get((hot[i], cold[i], root)).saturating_to_num(); - - assert_eq!( - alpha_after_root, - alpha_before_root[i] + share[i], - "staker {} incorrect α restaked", + bal_before[i] + share[i], + "staker {} cold-key balance changed unexpectedly", i ); } @@ -682,7 +704,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { owner_before + expected_refund ); - // α cleared for dissolved subnet + // α cleared for dissolved subnet & related counters reset assert!(Alpha::::iter().all(|((_h, _c, n), _)| n != netuid)); assert_eq!(SubnetAlphaIn::::get(netuid), 0.into()); assert_eq!(SubnetAlphaOut::::get(netuid), 0.into()); From 92a3a73786df34db3ec17d407b911e31837bc347 Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 20 Aug 2025 21:07:16 +0800 Subject: [PATCH 063/379] reduce new subnet creation --- evm-tests/test/uid.precompile.lookup.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evm-tests/test/uid.precompile.lookup.test.ts b/evm-tests/test/uid.precompile.lookup.test.ts index f6e22ce032..648262cb02 100644 --- a/evm-tests/test/uid.precompile.lookup.test.ts +++ b/evm-tests/test/uid.precompile.lookup.test.ts @@ -11,7 +11,7 @@ import { PolkadotSigner, TypedApi } from "polkadot-api"; import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" import { IUIDLookupABI, IUID_LOOKUP_ADDRESS } from "../src/contracts/uidLookup" import { keccak256 } from 'ethers'; -import { addNewSubnetwork, forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; +import { forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; describe("Test the UID Lookup precompile", () => { const hotkey = getRandomSubstrateKeypair(); @@ -37,7 +37,7 @@ describe("Test the UID Lookup precompile", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) - netuid = await addNewSubnetwork(api, hotkey, coldkey) + netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; await startCall(api, netuid, coldkey) const maybeUid = await api.query.SubtensorModule.Uids.getValue(netuid, convertPublicKeyToSs58(hotkey.publicKey)) From 9a7487b6a669454ea190f66fad22235c2329f5a8 Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 20 Aug 2025 21:13:14 +0800 Subject: [PATCH 064/379] revert the test case --- evm-tests/test/uid.precompile.lookup.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evm-tests/test/uid.precompile.lookup.test.ts b/evm-tests/test/uid.precompile.lookup.test.ts index 648262cb02..f6e22ce032 100644 --- a/evm-tests/test/uid.precompile.lookup.test.ts +++ b/evm-tests/test/uid.precompile.lookup.test.ts @@ -11,7 +11,7 @@ import { PolkadotSigner, TypedApi } from "polkadot-api"; import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" import { IUIDLookupABI, IUID_LOOKUP_ADDRESS } from "../src/contracts/uidLookup" import { keccak256 } from 'ethers'; -import { forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; +import { addNewSubnetwork, forceSetBalanceToSs58Address, startCall } from "../src/subtensor"; describe("Test the UID Lookup precompile", () => { const hotkey = getRandomSubstrateKeypair(); @@ -37,7 +37,7 @@ describe("Test the UID Lookup precompile", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) - netuid = (await api.query.SubtensorModule.TotalNetworks.getValue()) - 1; + netuid = await addNewSubnetwork(api, hotkey, coldkey) await startCall(api, netuid, coldkey) const maybeUid = await api.query.SubtensorModule.Uids.getValue(netuid, convertPublicKeyToSs58(hotkey.publicKey)) From 3c53b3002d1bceefc9bee0d91504f724b322cbe9 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 12:39:43 -0700 Subject: [PATCH 065/379] use the actual hotkey --- common/src/lib.rs | 3 +- pallets/subtensor/src/lib.rs | 3 + pallets/swap/src/mock.rs | 6 +- pallets/swap/src/pallet/impls.rs | 206 ++++++++++++++++++++++++++----- 4 files changed, 187 insertions(+), 31 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index a3882a88fc..ac17361879 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -9,7 +9,7 @@ use runtime_common::prod_or_fast; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ - MultiSignature, + MultiSignature, Vec, traits::{IdentifyAccount, Verify}, }; use subtensor_macros::freeze_struct; @@ -174,6 +174,7 @@ pub trait SubnetInfo { fn exists(netuid: NetUid) -> bool; fn mechanism(netuid: NetUid) -> u16; fn is_owner(account_id: &AccountId, netuid: NetUid) -> bool; + fn get_owned_hotkeys(coldkey: &AccountId) -> Vec; } pub trait BalanceOps { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index b45fcaa2b1..173ccd3aff 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -2593,6 +2593,9 @@ impl> fn is_owner(account_id: &T::AccountId, netuid: NetUid) -> bool { SubnetOwner::::get(netuid) == *account_id } + fn get_owned_hotkeys(coldkey: &T::AccountId) -> Vec { + OwnedHotkeys::::get(coldkey) + } } impl> diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index 78a8f925c8..ec22c12472 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -11,7 +11,7 @@ use frame_support::{ use frame_system::{self as system}; use sp_core::H256; use sp_runtime::{ - BuildStorage, + BuildStorage, Vec, traits::{BlakeTwo256, IdentityLookup}, }; use subtensor_runtime_common::{AlphaCurrency, BalanceOps, NetUid, SubnetInfo, TaoCurrency}; @@ -115,6 +115,10 @@ impl SubnetInfo for MockLiquidityProvider { fn is_owner(account_id: &AccountId, _netuid: NetUid) -> bool { *account_id != NOT_SUBNET_OWNER } + + fn get_owned_hotkeys(_coldkey: &AccountId) -> Vec { + Vec::::new() + } } pub struct MockBalanceOps; diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index ff715bede8..c40579b345 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1212,73 +1212,221 @@ impl Pallet { pub fn protocol_account_id() -> T::AccountId { T::ProtocolId::get().into_account_truncating() } - /// Liquidate (force-close) all LPs for `netuid`, **refund** providers, and reset all swap state. + + /// Distribute `alpha_total` back to the coldkey's hotkeys for `netuid`. + /// - If the coldkey owns multiple hotkeys, split pro‑rata by current α stake on this subnet. + /// If all stakes are zero, split evenly. + /// - If no hotkeys exist, fall back to (coldkey, coldkey). + /// + pub fn refund_alpha_to_hotkeys( + netuid: NetUid, + coldkey: &T::AccountId, + alpha_total: AlphaCurrency, + ) { + if alpha_total.is_zero() { + return; + } + + // 1) Fetch owned hotkeys via SubnetInfo; no direct dependency on pallet_subtensor. + let mut hotkeys: sp_std::vec::Vec = T::SubnetInfo::get_owned_hotkeys(coldkey); + + // Fallback: if no hotkeys are currently owned, use coldkey as its own hotkey. + if hotkeys.is_empty() { + hotkeys.push(coldkey.clone()); + } + + // 2) Build weights based on current α stake on this subnet. + // If sum_weights == 0, we'll split evenly. + let weights: sp_std::vec::Vec = hotkeys + .iter() + .map(|hk| { + let bal = T::BalanceOps::alpha_balance(netuid, coldkey, hk); + bal.to_u64() as u128 + }) + .collect(); + + let sum_weights: u128 = weights.iter().copied().sum(); + let n: u128 = hotkeys.len() as u128; + + let total_alpha_u128: u128 = alpha_total.to_u64() as u128; + + // 3) Compute integer shares with remainder handling. + let mut shares: sp_std::vec::Vec<(T::AccountId, u64)> = + sp_std::vec::Vec::with_capacity(hotkeys.len()); + if sum_weights > 0 { + // Pro‑rata by weights. + let mut assigned: u128 = 0; + for (hk, w) in hotkeys.iter().cloned().zip(weights.iter().copied()) { + let part: u128 = (total_alpha_u128.saturating_mul(w)) / sum_weights; + shares.push((hk, part as u64)); + assigned = assigned.saturating_add(part); + } + let mut remainder: u64 = total_alpha_u128 + .saturating_sub(assigned) + .try_into() + .unwrap_or(0); + let len = shares.len(); + let mut i = 0usize; + while remainder > 0 && i < len { + shares[i].1 = shares[i].1.saturating_add(1); + remainder = remainder.saturating_sub(1); + i += 1; + } + } else { + // Even split. + let base: u64 = (total_alpha_u128 / n) as u64; + let mut remainder: u64 = (total_alpha_u128 % n) as u64; + for hk in hotkeys.into_iter() { + let add_one = if remainder > 0 { + remainder = remainder.saturating_sub(1); + 1 + } else { + 0 + }; + shares.push((hk, base.saturating_add(add_one))); + } + } + + // 4) Deposit to (coldkey, hotkey). On failure, collect leftover and retry on successes. + let mut leftover: u64 = 0; + let mut successes: sp_std::vec::Vec = sp_std::vec::Vec::new(); + + for (hk, amt_u64) in shares.iter() { + if *amt_u64 == 0 { + continue; + } + let amt: AlphaCurrency = (*amt_u64).into(); + match T::BalanceOps::increase_stake(coldkey, hk, netuid, amt) { + Ok(_) => successes.push(hk.clone()), + Err(e) => { + log::warn!( + "refund_alpha_to_hotkeys: increase_stake failed (cold={:?}, hot={:?}, netuid={:?}, amt={:?}): {:?}", + coldkey, + hk, + netuid, + amt_u64, + e + ); + leftover = leftover.saturating_add(*amt_u64); + } + } + } + + // 5) Retry: spread any leftover across the hotkeys that succeeded in step 4. + if leftover > 0 && !successes.is_empty() { + let count = successes.len() as u64; + let base = leftover / count; + let mut rem = leftover % count; + + for hk in successes.iter() { + let add: u64 = base.saturating_add(if rem > 0 { + rem -= 1; + 1 + } else { + 0 + }); + if add == 0 { + continue; + } + let _ = T::BalanceOps::increase_stake(coldkey, hk, netuid, add.into()); + } + leftover = 0; + } + + // 6) Final fallback: if for some reason every deposit failed, deposit to (coldkey, coldkey). + if leftover > 0 { + let _ = T::BalanceOps::increase_stake(coldkey, coldkey, netuid, leftover.into()); + } + } + + /// Liquidate (force-close) all LPs for `netuid`, refund providers, and reset all swap state. /// - /// - **V3 path** (mechanism==1 && SwapV3Initialized): + /// - **V3 path** (mechanism == 1 && SwapV3Initialized): /// * Remove **all** positions (user + protocol) via `do_remove_liquidity`. - /// * **Refund** each owner: TAO = `rm.tao + rm.fee_tao`, ALPHA = `rm.alpha + rm.fee_alpha`, - /// using `T::BalanceOps::{deposit_tao, deposit_alpha}`. + /// * **Refund** each owner: + /// - TAO = Σ(position.tao + position.fee_tao) → credited to the owner's **coldkey** free balance. + /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back across **all owned hotkeys** + /// using `refund_alpha_to_hotkeys` (pro‑rata by current α stake; even split if all zero; never skipped). + /// * Decrease "provided reserves" (principal only) for non‑protocol owners. /// * Clear ActiveTickIndexManager entries, ticks, fee globals, price, tick, liquidity, /// init flag, bitmap words, fee rate knob, and user LP flag. /// /// - **V2 / non‑V3 path**: - /// * No per‑position records exist; still defensively clear the same V3 storages - /// (safe no‑ops) so the subnet leaves **no swap residue**. + /// * No per‑position records exist; still defensively clear the same V3 storages (safe no‑ops). pub fn do_liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult { let mechid = T::SubnetInfo::mechanism(netuid.into()); let v3_initialized = SwapV3Initialized::::get(netuid); let user_lp_enabled = - >::is_user_liquidity_enabled(netuid); + >::is_user_liquidity_enabled(netuid); let is_v3_mode = mechid == 1 && v3_initialized; if is_v3_mode { - // -------- V3: close every position, REFUND owners, then clear all V3 state -------- + // -------- V3: close every position, aggregate refunds, clear state -------- // 1) Snapshot all (owner, position_id) under this netuid to avoid iterator aliasing. - let mut to_close: sp_std::vec::Vec<(T::AccountId, PositionId)> = - sp_std::vec::Vec::new(); + struct CloseItem { + owner: A, + pos_id: PositionId, + } + let mut to_close: sp_std::vec::Vec> = sp_std::vec::Vec::new(); for ((n, owner, pos_id), _pos) in Positions::::iter() { if n == netuid { - to_close.push((owner, pos_id)); + to_close.push(CloseItem { owner, pos_id }); } } let protocol_account = Self::protocol_account_id(); - // 2) Remove all positions (user + protocol) and REFUND both legs to the owner. - for (owner, pos_id) in to_close.into_iter() { + // 2) Aggregate refunds per owner while removing positions. + use sp_std::collections::btree_map::BTreeMap; + let mut refunds: BTreeMap = BTreeMap::new(); + + for CloseItem { owner, pos_id } in to_close.into_iter() { + // Remove position first; this returns principal + accrued fees amounts. let rm = Self::do_remove_liquidity(netuid, &owner, pos_id)?; - // Refund TAO: principal + accrued TAO fees. - let tao_refund = rm.tao.saturating_add(rm.fee_tao); - if tao_refund > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, tao_refund); + // Accumulate (TAO, α) refund: principal + fees. + let tao_add = rm.tao.saturating_add(rm.fee_tao); + let alpha_add = rm.alpha.saturating_add(rm.fee_alpha); + + refunds + .entry(owner.clone()) + .and_modify(|(t, a)| { + *t = t.saturating_add(tao_add); + *a = a.saturating_add(alpha_add); + }) + .or_insert((tao_add, alpha_add)); + + // Mirror "user-provided" reserves by principal only (fees have been paid out). + if owner != protocol_account { + T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + T::BalanceOps::decrease_provided_alpha_reserve(netuid, rm.alpha); } + } - // Refund ALPHA: principal + accrued ALPHA fees. - let alpha_refund = rm.alpha.saturating_add(rm.fee_alpha); - if !alpha_refund.is_zero() { - // Credit ALPHA back to the provider on (coldkey=owner, hotkey=owner). - T::BalanceOps::increase_stake(&owner, &owner, netuid.into(), alpha_refund)?; + // 3) Process refunds per owner (no skipping). + for (owner, (tao_sum, alpha_sum)) in refunds.into_iter() { + // TAO → coldkey free balance + if tao_sum > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, tao_sum); } - // Mirror `remove_liquidity`: update **user-provided** reserves by principal only. - // Skip for protocol-owned liquidity which never contributed to provided reserves. - if owner != protocol_account { - T::BalanceOps::decrease_provided_tao_reserve(netuid.into(), rm.tao); - T::BalanceOps::decrease_provided_alpha_reserve(netuid.into(), rm.alpha); + // α → split across all owned hotkeys (never skip). Skip α for protocol account + // because protocol liquidity does not map to user stake. + if !alpha_sum.is_zero() && owner != protocol_account { + Self::refund_alpha_to_hotkeys(netuid, &owner, alpha_sum); } } - // 3) Clear active tick index set by walking ticks we are about to clear. + // 4) Clear active tick index set by walking ticks we are about to clear. let active_ticks: sp_std::vec::Vec = Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); for ti in active_ticks { ActiveTickIndexManager::::remove(netuid, ti); } - // 4) Clear storage: + // 5) Clear storage: // Positions (StorageNMap) – prefix is **(netuid,)** not just netuid. let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); From c4668d4ce05ed8f10fa1c98809747fb0e0259d0d Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 13:24:03 -0700 Subject: [PATCH 066/379] add test massive_dissolve --- pallets/subtensor/src/tests/networks.rs | 469 ++++++++++++++++++++++++ 1 file changed, 469 insertions(+) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index baa3910fa6..b378cf8899 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -4,6 +4,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; +use sp_std::collections::btree_map::BTreeMap; use substrate_fixed::types::{U64F64, U96F32}; use subtensor_runtime_common::TaoCurrency; use subtensor_swap_interface::SwapHandler; @@ -1233,3 +1234,471 @@ fn test_tempo_greater_than_weight_set_rate_limit() { assert!(tempo as u64 >= weights_set_rate_limit); }) } + +#[test] +fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state() { + new_test_ext(0).execute_with(|| { + // ──────────────────────────────────────────────────────────────────── + // 0) Constants and helpers (distinct hotkeys & coldkeys) + // ──────────────────────────────────────────────────────────────────── + const NUM_NETS: usize = 4; + + // Six LP coldkeys + let cold_lps: [U256; 6] = [ + U256::from(3001), + U256::from(3002), + U256::from(3003), + U256::from(3004), + U256::from(3005), + U256::from(3006), + ]; + + // For each coldkey, define two DISTINCT hotkeys it owns. + let mut cold_to_hots: BTreeMap = BTreeMap::new(); + for &c in cold_lps.iter() { + let h1 = U256::from(c.low_u64().saturating_add(100_000)); + let h2 = U256::from(c.low_u64().saturating_add(200_000)); + cold_to_hots.insert(c, [h1, h2]); + } + + // Distinct τ pot sizes per net. + let pots: [u64; NUM_NETS] = [12_345, 23_456, 34_567, 45_678]; + + let lp_sets_per_net: [&[U256]; NUM_NETS] = [ + &cold_lps[0..4], // net0: A,B,C,D + &cold_lps[2..6], // net1: C,D,E,F + &cold_lps[0..6], // net2: A..F + &cold_lps[1..5], // net3: B,C,D,E + ]; + + // Multiple bands/sizes → many positions per cold across nets, using mixed hotkeys. + let bands: [i32; 3] = [5, 13, 30]; + let liqs: [u64; 3] = [400_000, 700_000, 1_100_000]; + + // Helper: add a V3 position via a (hot, cold) pair. + let add_pos = |net: NetUid, hot: U256, cold: U256, band: i32, liq: u64| { + let ct = pallet_subtensor_swap::CurrentTick::::get(net); + let lo = ct.saturating_sub(band); + let hi = ct.saturating_add(band); + assert_ok!(pallet_subtensor_swap::Pallet::::add_liquidity( + RuntimeOrigin::signed(cold), + hot, + net, + lo, + hi, + liq + )); + }; + + // ──────────────────────────────────────────────────────────────────── + // 1) Create many subnets, enable V3, fix price at tick=0 (sqrt≈1) + // ──────────────────────────────────────────────────────────────────── + let mut nets: Vec = Vec::new(); + for i in 0..NUM_NETS { + let owner_hot = U256::from(10_000 + (i as u64)); + let owner_cold = U256::from(20_000 + (i as u64)); + let net = add_dynamic_network(&owner_hot, &owner_cold); + SubtensorModule::set_max_registrations_per_block(net, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(net, 1_000u16); + Emission::::insert(net, Vec::::new()); + SubtensorModule::set_subnet_locked_balance(net, TaoCurrency::from(0)); + + assert_ok!( + pallet_subtensor_swap::Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + net, + true + ) + ); + + // Price/tick pinned so LP math stays stable (sqrt(1)). + let ct0 = pallet_subtensor_swap::tick::TickIndex::new_unchecked(0); + let sqrt1 = ct0.try_to_sqrt_price().expect("sqrt(1) price"); + pallet_subtensor_swap::CurrentTick::::set(net, ct0); + pallet_subtensor_swap::AlphaSqrtPrice::::set(net, sqrt1); + + nets.push(net); + } + + // Map net → index for quick lookups. + let mut net_index: BTreeMap = BTreeMap::new(); + for (i, &n) in nets.iter().enumerate() { + net_index.insert(n, i); + } + + // ──────────────────────────────────────────────────────────────────── + // 2) Pre-create a handful of small (hot, cold) pairs so accounts exist + // ──────────────────────────────────────────────────────────────────── + for id in 0u64..10 { + let cold_acc = U256::from(1_000_000 + id); + let hot_acc = U256::from(2_000_000 + id); + for &net in nets.iter() { + register_ok_neuron(net, hot_acc, cold_acc, 100_000 + id); + } + } + + // ──────────────────────────────────────────────────────────────────── + // 3) LPs per net: register each (hot, cold), massive τ prefund, and stake + // ──────────────────────────────────────────────────────────────────── + for &cold in cold_lps.iter() { + SubtensorModule::add_balance_to_coldkey_account(&cold, u64::MAX); + } + + // τ balances before LP adds (after staking): + let mut tao_before: BTreeMap = BTreeMap::new(); + + // Ordered α snapshot per net at **pair granularity** (pre‑LP): + let mut alpha_pairs_per_net: BTreeMap> = BTreeMap::new(); + + // Register both hotkeys for each participating cold on each net and stake τ→α. + for (ni, &net) in nets.iter().enumerate() { + let participants = lp_sets_per_net[ni]; + for &cold in participants.iter() { + let [hot1, hot2] = cold_to_hots[&cold]; + + // Ensure (hot, cold) neurons exist on this net. + register_ok_neuron( + net, + hot1, + cold, + (ni as u64) * 10_000 + (hot1.low_u64() % 10_000), + ); + register_ok_neuron( + net, + hot2, + cold, + (ni as u64) * 10_000 + (hot2.low_u64() % 10_000) + 1, + ); + + // Stake τ (split across the two hotkeys). + let base: u64 = + 5_000_000 + ((ni as u64) * 1_000_000) + ((cold.low_u64() % 10) * 250_000); + let stake1: u64 = base.saturating_mul(3) / 5; // 60% + let stake2: u64 = base.saturating_sub(stake1); // 40% + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot1, + net, + stake1.into() + )); + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot2, + net, + stake2.into() + )); + } + } + + // Record τ balances now (post‑stake, pre‑LP). + for &cold in cold_lps.iter() { + tao_before.insert(cold, SubtensorModule::get_coldkey_balance(&cold)); + } + + // Capture **pair‑level** α snapshot per net (pre‑LP). + for ((hot, cold, net), amt) in Alpha::::iter() { + if let Some(&ni) = net_index.get(&net) { + if lp_sets_per_net[ni].iter().any(|&c| c == cold) { + let a: u128 = amt.saturating_to_num(); + if a > 0 { + alpha_pairs_per_net + .entry(net) + .or_default() + .push(((hot, cold), a)); + } + } + } + } + + // ──────────────────────────────────────────────────────────────────── + // 4) Add many V3 positions per cold across nets, alternating hotkeys + // ──────────────────────────────────────────────────────────────────── + for (ni, &net) in nets.iter().enumerate() { + let participants = lp_sets_per_net[ni]; + for (pi, &cold) in participants.iter().enumerate() { + let [hot1, hot2] = cold_to_hots[&cold]; + let hots = [hot1, hot2]; + for k in 0..3 { + let band = bands[(pi + k) % bands.len()]; + let liq = liqs[(ni + k) % liqs.len()]; + let hot = hots[k % hots.len()]; + add_pos(net, hot, cold, band, liq); + } + } + } + + // Snapshot τ balances AFTER LP adds (to measure actual principal debit). + let mut tao_after_adds: BTreeMap = BTreeMap::new(); + for &cold in cold_lps.iter() { + tao_after_adds.insert(cold, SubtensorModule::get_coldkey_balance(&cold)); + } + + // ──────────────────────────────────────────────────────────────────── + // 5) Compute Hamilton-apportionment BASE shares per cold and total leftover + // from the **pair-level** pre‑LP α snapshot; also count pairs per cold. + // ──────────────────────────────────────────────────────────────────── + let mut base_share_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u64)).collect(); + let mut pair_count_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u32)).collect(); + + let mut leftover_total: u64 = 0; + + for (ni, &net) in nets.iter().enumerate() { + let pot = pots[ni]; + let pairs = alpha_pairs_per_net.get(&net).cloned().unwrap_or_default(); + if pot == 0 || pairs.is_empty() { + continue; + } + let total_alpha: u128 = pairs.iter().map(|(_, a)| *a).sum(); + if total_alpha == 0 { + continue; + } + + let mut base_sum_net: u64 = 0; + for ((_, cold), a) in pairs.iter().copied() { + // quota = a * pot / total_alpha + let prod: u128 = (a as u128).saturating_mul(pot as u128); + let base: u64 = (prod / total_alpha) as u64; + base_sum_net = base_sum_net.saturating_add(base); + *base_share_cold.entry(cold).or_default() = + base_share_cold[&cold].saturating_add(base); + *pair_count_cold.entry(cold).or_default() += 1; + } + let leftover_net = pot.saturating_sub(base_sum_net); + leftover_total = leftover_total.saturating_add(leftover_net); + } + + // ──────────────────────────────────────────────────────────────────── + // 6) Seed τ pots and dissolve *all* networks (liquidates LPs + refunds) + // ──────────────────────────────────────────────────────────────────── + for (ni, &net) in nets.iter().enumerate() { + SubnetTAO::::insert(net, TaoCurrency::from(pots[ni])); + } + for &net in nets.iter() { + assert_ok!(SubtensorModule::do_dissolve_network(net)); + } + + // ──────────────────────────────────────────────────────────────────── + // 7) Assertions: τ balances, α gone, nets removed, swap state clean + // (Hamilton invariants enforced at cold-level without relying on tie-break) + // ──────────────────────────────────────────────────────────────────── + // Collect actual pot credits per cold (principal cancels out against adds when comparing before→after). + let mut actual_pot_cold: BTreeMap = + cold_lps.iter().copied().map(|c| (c, 0_u64)).collect(); + for &cold in cold_lps.iter() { + let before = tao_before[&cold]; + let after = SubtensorModule::get_coldkey_balance(&cold); + actual_pot_cold.insert(cold, after.saturating_sub(before)); + } + + // (a) Sum of actual pot credits equals total pots. + let total_actual: u64 = actual_pot_cold.values().copied().sum(); + let total_pots: u64 = pots.iter().copied().sum(); + assert_eq!( + total_actual, total_pots, + "total τ pot credited across colds must equal sum of pots" + ); + + // (b) Each cold’s pot is within Hamilton bounds: base ≤ actual ≤ base + #pairs. + let mut extra_accum: u64 = 0; + for &cold in cold_lps.iter() { + let base = *base_share_cold.get(&cold).unwrap_or(&0); + let pairs = *pair_count_cold.get(&cold).unwrap_or(&0) as u64; + let actual = *actual_pot_cold.get(&cold).unwrap_or(&0); + + assert!( + actual >= base, + "cold {:?} actual pot {} is below base {}", + cold, + actual, + base + ); + assert!( + actual <= base.saturating_add(pairs), + "cold {:?} actual pot {} exceeds base + pairs ({} + {})", + cold, + actual, + base, + pairs + ); + + extra_accum = extra_accum.saturating_add(actual.saturating_sub(base)); + } + + // (c) The total “extra beyond base” equals the computed leftover_total across nets. + assert_eq!( + extra_accum, leftover_total, + "sum of extras beyond base must equal total leftover" + ); + + // (d) τ principal was fully refunded (compare after_adds → after). + for &cold in cold_lps.iter() { + let before = tao_before[&cold]; + let mid = tao_after_adds[&cold]; + let after = SubtensorModule::get_coldkey_balance(&cold); + let principal_actual = before.saturating_sub(mid); + let actual_pot = after.saturating_sub(before); + assert_eq!( + after.saturating_sub(mid), + principal_actual.saturating_add(actual_pot), + "cold {:?} τ balance incorrect vs 'after_adds'", + cold + ); + } + + // For each dissolved net, check α ledgers gone, network removed, and swap state clean. + for &net in nets.iter() { + assert!( + Alpha::::iter().all(|((_h, _c, n), _)| n != net), + "alpha ledger not fully cleared for net {:?}", + net + ); + assert!( + !SubtensorModule::if_subnet_exist(net), + "subnet {:?} still exists", + net + ); + assert!( + pallet_subtensor_swap::Ticks::::iter_prefix(net) + .next() + .is_none(), + "ticks not cleared for net {:?}", + net + ); + assert!( + !pallet_subtensor_swap::Positions::::iter() + .any(|((n, _owner, _pid), _)| n == net), + "swap positions not fully cleared for net {:?}", + net + ); + assert_eq!( + pallet_subtensor_swap::FeeGlobalTao::::get(net).saturating_to_num::(), + 0, + "FeeGlobalTao nonzero for net {:?}", + net + ); + assert_eq!( + pallet_subtensor_swap::FeeGlobalAlpha::::get(net).saturating_to_num::(), + 0, + "FeeGlobalAlpha nonzero for net {:?}", + net + ); + assert_eq!( + pallet_subtensor_swap::CurrentLiquidity::::get(net), + 0, + "CurrentLiquidity not zero for net {:?}", + net + ); + assert!( + !pallet_subtensor_swap::SwapV3Initialized::::get(net), + "SwapV3Initialized still set" + ); + assert!( + !pallet_subtensor_swap::EnabledUserLiquidity::::get(net), + "EnabledUserLiquidity still set" + ); + assert!( + pallet_subtensor_swap::TickIndexBitmapWords::::iter_prefix((net,)) + .next() + .is_none(), + "TickIndexBitmapWords not cleared for net {:?}", + net + ); + } + + // ──────────────────────────────────────────────────────────────────── + // 8) Re-register a fresh subnet and re‑stake using the pallet’s min rule + // Assert αΔ equals the sim-swap result for the exact τ staked. + // ──────────────────────────────────────────────────────────────────── + let new_owner_hot = U256::from(99_000); + let new_owner_cold = U256::from(99_001); + let net_new = add_dynamic_network(&new_owner_hot, &new_owner_cold); + SubtensorModule::set_max_registrations_per_block(net_new, 1_000u16); + SubtensorModule::set_target_registrations_per_interval(net_new, 1_000u16); + Emission::::insert(net_new, Vec::::new()); + SubtensorModule::set_subnet_locked_balance(net_new, TaoCurrency::from(0)); + + assert_ok!( + pallet_subtensor_swap::Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + net_new, + true + ) + ); + let ct0 = pallet_subtensor_swap::tick::TickIndex::new_unchecked(0); + let sqrt1 = ct0.try_to_sqrt_price().expect("sqrt(1)"); + pallet_subtensor_swap::CurrentTick::::set(net_new, ct0); + pallet_subtensor_swap::AlphaSqrtPrice::::set(net_new, sqrt1); + + // Compute the exact min stake per the pallet rule: DefaultMinStake + fee(DefaultMinStake). + let min_stake_u64: u64 = DefaultMinStake::::get().into(); + let fee_for_min: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + net_new, + subtensor_swap_interface::OrderType::Buy, + min_stake_u64, + ) + .map(|r| r.fee_paid) + .unwrap_or_else(|_e| { + as subtensor_swap_interface::SwapHandler< + ::AccountId, + >>::approx_fee_amount(net_new, min_stake_u64) + }); + let min_amount_required: u64 = min_stake_u64.saturating_add(fee_for_min); + + // Re‑stake from three coldkeys; choose a specific DISTINCT hotkey per cold. + for &cold in &cold_lps[0..3] { + let [hot1, _hot2] = cold_to_hots[&cold]; + register_ok_neuron(net_new, hot1, cold, 7777); + + let before_tau = SubtensorModule::get_coldkey_balance(&cold); + let a_prev: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); + + // Expected α for this exact τ, using the same sim path as the pallet. + let expected_alpha_out: u64 = pallet_subtensor_swap::Pallet::::sim_swap( + net_new, + subtensor_swap_interface::OrderType::Buy, + min_amount_required, + ) + .map(|r| r.amount_paid_out) + .expect("sim_swap must succeed for fresh net and min amount"); + + assert_ok!(SubtensorModule::do_add_stake( + RuntimeOrigin::signed(cold), + hot1, + net_new, + min_amount_required.into() + )); + + let after_tau = SubtensorModule::get_coldkey_balance(&cold); + let a_new: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); + let a_delta = a_new.saturating_sub(a_prev); + + // τ decreased by exactly the amount we sent. + assert_eq!( + after_tau, + before_tau.saturating_sub(min_amount_required), + "τ did not decrease by the min required restake amount for cold {:?}", + cold + ); + + // α minted equals the simulated swap’s net out for that same τ. + assert_eq!( + a_delta, expected_alpha_out, + "α minted mismatch for cold {:?} (hot {:?}) on new net (αΔ {}, expected {})", + cold, hot1, a_delta, expected_alpha_out + ); + } + + // Ensure V3 still functional on new net: add a small position for the first cold using its hot1 + let who_cold = cold_lps[0]; + let [who_hot, _] = cold_to_hots[&who_cold]; + add_pos(net_new, who_hot, who_cold, 8, 123_456); + assert!( + pallet_subtensor_swap::Positions::::iter() + .any(|((n, owner, _pid), _)| n == net_new && owner == who_cold), + "new position not recorded on the re-registered net" + ); + }); +} From 00a7f50e668a7eb1cb37776d6258404da609c0c1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 13:25:44 -0700 Subject: [PATCH 067/379] decrease subnet limit to 128 --- pallets/subtensor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 173ccd3aff..5ae36117b5 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -872,7 +872,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for subnet limit. pub fn DefaultSubnetLimit() -> u16 { - 256 + 128 } #[pallet::storage] From 43bc7885af1196e129c2e34498ab3235dfb0f2dc Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 13:58:54 -0700 Subject: [PATCH 068/379] dissolve_all_liquidity_providers --- pallets/subtensor/src/coinbase/root.rs | 2 +- pallets/swap-interface/src/lib.rs | 2 +- pallets/swap/src/pallet/impls.rs | 29 +++++++++----------------- pallets/swap/src/pallet/tests.rs | 16 +++++++------- 4 files changed, 20 insertions(+), 29 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 477a42f3b4..1d4aa91191 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -374,7 +374,7 @@ impl Pallet { ); // 2. --- Perform the cleanup before removing the network. - T::SwapInterface::liquidate_all_liquidity_providers(netuid)?; + T::SwapInterface::dissolve_all_liquidity_providers(netuid)?; Self::destroy_alpha_in_out_stakes(netuid)?; // 3. --- Remove the network diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index f29357c741..268893f6a1 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -34,7 +34,7 @@ pub trait SwapHandler { alpha_delta: AlphaCurrency, ); fn is_user_liquidity_enabled(netuid: NetUid) -> bool; - fn liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult; + fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult; } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index c40579b345..3b07aaa768 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1218,11 +1218,7 @@ impl Pallet { /// If all stakes are zero, split evenly. /// - If no hotkeys exist, fall back to (coldkey, coldkey). /// - pub fn refund_alpha_to_hotkeys( - netuid: NetUid, - coldkey: &T::AccountId, - alpha_total: AlphaCurrency, - ) { + pub fn refund_alpha(netuid: NetUid, coldkey: &T::AccountId, alpha_total: AlphaCurrency) { if alpha_total.is_zero() { return; } @@ -1339,21 +1335,20 @@ impl Pallet { } } - /// Liquidate (force-close) all LPs for `netuid`, refund providers, and reset all swap state. + /// Dissolve all LPs for `netuid`, refund providers, and reset all swap state. /// /// - **V3 path** (mechanism == 1 && SwapV3Initialized): - /// * Remove **all** positions (user + protocol) via `do_remove_liquidity`. + /// * Remove **all** positions via `do_remove_liquidity`. /// * **Refund** each owner: /// - TAO = Σ(position.tao + position.fee_tao) → credited to the owner's **coldkey** free balance. - /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back across **all owned hotkeys** - /// using `refund_alpha_to_hotkeys` (pro‑rata by current α stake; even split if all zero; never skipped). + /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back /// * Decrease "provided reserves" (principal only) for non‑protocol owners. /// * Clear ActiveTickIndexManager entries, ticks, fee globals, price, tick, liquidity, /// init flag, bitmap words, fee rate knob, and user LP flag. /// /// - **V2 / non‑V3 path**: /// * No per‑position records exist; still defensively clear the same V3 storages (safe no‑ops). - pub fn do_liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { let mechid = T::SubnetInfo::mechanism(netuid.into()); let v3_initialized = SwapV3Initialized::::get(netuid); let user_lp_enabled = @@ -1383,7 +1378,6 @@ impl Pallet { let mut refunds: BTreeMap = BTreeMap::new(); for CloseItem { owner, pos_id } in to_close.into_iter() { - // Remove position first; this returns principal + accrued fees amounts. let rm = Self::do_remove_liquidity(netuid, &owner, pos_id)?; // Accumulate (TAO, α) refund: principal + fees. @@ -1398,7 +1392,6 @@ impl Pallet { }) .or_insert((tao_add, alpha_add)); - // Mirror "user-provided" reserves by principal only (fees have been paid out). if owner != protocol_account { T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); T::BalanceOps::decrease_provided_alpha_reserve(netuid, rm.alpha); @@ -1415,7 +1408,7 @@ impl Pallet { // α → split across all owned hotkeys (never skip). Skip α for protocol account // because protocol liquidity does not map to user stake. if !alpha_sum.is_zero() && owner != protocol_account { - Self::refund_alpha_to_hotkeys(netuid, &owner, alpha_sum); + Self::refund_alpha(netuid, &owner, alpha_sum); } } @@ -1443,13 +1436,11 @@ impl Pallet { // Active tick bitmap words (StorageNMap) – prefix is **(netuid,)**. let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); - - // Remove knobs (safe on deregistration). FeeRate::::remove(netuid); EnabledUserLiquidity::::remove(netuid); log::debug!( - "liquidate_all_liquidity_providers: netuid={:?}, mode=V3, user_lp_enabled={}, v3_state_cleared + refunds", + "dissolve_all_liquidity_providers: netuid={:?}, mode=V3, user_lp_enabled={}, v3_state_cleared + refunds", netuid, user_lp_enabled ); @@ -1484,7 +1475,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "liquidate_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, user_lp_enabled={}, state_cleared", + "dissolve_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, user_lp_enabled={}, state_cleared", netuid, user_lp_enabled ); @@ -1576,8 +1567,8 @@ impl SwapHandler for Pallet { fn is_user_liquidity_enabled(netuid: NetUid) -> bool { EnabledUserLiquidity::::get(netuid) } - fn liquidate_all_liquidity_providers(netuid: NetUid) -> DispatchResult { - Self::do_liquidate_all_liquidity_providers(netuid) + fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { + Self::do_dissolve_all_liquidity_providers(netuid) } } diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 6b5204f536..e6148ed94e 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -1977,7 +1977,7 @@ fn test_liquidate_v3_removes_positions_ticks_and_state() { assert!(had_bitmap_words); // ACT: Liquidate & reset swap state - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // ASSERT: positions cleared (both user and protocol) assert_eq!( @@ -2062,7 +2062,7 @@ fn test_liquidate_v3_with_user_liquidity_disabled() { )); // ACT - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // ASSERT: positions & ticks gone, state reset assert_eq!( @@ -2107,7 +2107,7 @@ fn test_liquidate_non_v3_uninitialized_ok_and_clears() { ); // ACT - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // ASSERT: Defensive clears leave no residues and do not panic assert!( @@ -2160,9 +2160,9 @@ fn test_liquidate_idempotent() { )); // 1st liquidation - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // 2nd liquidation (no state left) — must still succeed - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // State remains empty assert!( @@ -2184,8 +2184,8 @@ fn test_liquidate_idempotent() { let netuid = NetUid::from(8); // Never initialize V3 - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); assert!( Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) @@ -2252,7 +2252,7 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); // Liquidate everything on the subnet. - assert_ok!(Pallet::::do_liquidate_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // Expect balances restored to BEFORE snapshots (no swaps ran -> zero fees). // TAO: we withdrew 'need_tao' above and liquidation refunded it, so we should be back to 'tao_before'. From f22ef77bb5398ba5500fbc362007d862619fb13e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:39:35 -0700 Subject: [PATCH 069/379] clippy --- pallets/admin-utils/src/lib.rs | 2 +- pallets/subtensor/src/coinbase/root.rs | 4 +- pallets/subtensor/src/tests/networks.rs | 65 +++++++------------- pallets/swap/src/pallet/impls.rs | 82 ++++++++++++------------- 4 files changed, 63 insertions(+), 90 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 22e1c4e6e3..9b60377b03 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -972,7 +972,7 @@ pub mod pallet { pub fn sudo_set_subnet_limit(origin: OriginFor, max_subnets: u16) -> DispatchResult { ensure_root(origin)?; pallet_subtensor::Pallet::::set_max_subnets(max_subnets); - log::debug!("MaxSubnets ( max_subnets: {:?} ) ", max_subnets); + log::debug!("MaxSubnets ( max_subnets: {max_subnets:?} ) "); Ok(()) } diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 1d4aa91191..8a95c9e8df 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -477,9 +477,7 @@ impl Pallet { // --- Log final removal. log::debug!( - "remove_network: netuid={}, owner={:?} removed successfully", - netuid, - owner_coldkey + "remove_network: netuid={netuid}, owner={owner_coldkey:?} removed successfully" ); } diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index b378cf8899..4659ee4bbe 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -117,6 +117,7 @@ fn dissolve_single_alpha_out_staker_gets_all_tao() { }); } +#[allow(clippy::indexing_slicing)] #[test] fn dissolve_two_stakers_pro_rata_distribution() { new_test_ext(0).execute_with(|| { @@ -142,8 +143,8 @@ fn dissolve_two_stakers_pro_rata_distribution() { // Expected τ shares with largest remainder let total = a1 + a2; - let prod1 = (a1 as u128) * (pot as u128); - let prod2 = (a2 as u128) * (pot as u128); + let prod1 = a1 * (pot as u128); + let prod2 = a2 * (pot as u128); let share1 = (prod1 / total) as u64; let share2 = (prod2 / total) as u64; let mut distributed = share1 + share2; @@ -151,12 +152,8 @@ fn dissolve_two_stakers_pro_rata_distribution() { if distributed < pot { rem.sort_by_key(|&(_c, r)| core::cmp::Reverse(r)); let leftover = pot - distributed; - for i in 0..(leftover as usize) { - if rem[i].0 == s1_cold { - distributed += 1; - } else { - distributed += 1; - } + for _ in 0..leftover as usize { + distributed += 1; } } // Recompute exact expected shares using the same logic @@ -694,8 +691,7 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { assert_eq!( SubtensorModule::get_coldkey_balance(&cold[i]), bal_before[i] + share[i], - "staker {} cold-key balance changed unexpectedly", - i + "staker {i} cold-key balance changed unexpectedly" ); } @@ -1235,6 +1231,7 @@ fn test_tempo_greater_than_weight_set_rate_limit() { }) } +#[allow(clippy::indexing_slicing)] #[test] fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state() { new_test_ext(0).execute_with(|| { @@ -1399,7 +1396,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( // Capture **pair‑level** α snapshot per net (pre‑LP). for ((hot, cold, net), amt) in Alpha::::iter() { if let Some(&ni) = net_index.get(&net) { - if lp_sets_per_net[ni].iter().any(|&c| c == cold) { + if lp_sets_per_net[ni].contains(&cold) { let a: u128 = amt.saturating_to_num(); if a > 0 { alpha_pairs_per_net @@ -1459,7 +1456,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( let mut base_sum_net: u64 = 0; for ((_, cold), a) in pairs.iter().copied() { // quota = a * pot / total_alpha - let prod: u128 = (a as u128).saturating_mul(pot as u128); + let prod: u128 = a.saturating_mul(pot as u128); let base: u64 = (prod / total_alpha) as u64; base_sum_net = base_sum_net.saturating_add(base); *base_share_cold.entry(cold).or_default() = @@ -1510,18 +1507,11 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( assert!( actual >= base, - "cold {:?} actual pot {} is below base {}", - cold, - actual, - base + "cold {cold:?} actual pot {actual} is below base {base}" ); assert!( actual <= base.saturating_add(pairs), - "cold {:?} actual pot {} exceeds base + pairs ({} + {})", - cold, - actual, - base, - pairs + "cold {cold:?} actual pot {actual} exceeds base + pairs ({base} + {pairs})" ); extra_accum = extra_accum.saturating_add(actual.saturating_sub(base)); @@ -1543,8 +1533,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( assert_eq!( after.saturating_sub(mid), principal_actual.saturating_add(actual_pot), - "cold {:?} τ balance incorrect vs 'after_adds'", - cold + "cold {cold:?} τ balance incorrect vs 'after_adds'" ); } @@ -1552,44 +1541,37 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( for &net in nets.iter() { assert!( Alpha::::iter().all(|((_h, _c, n), _)| n != net), - "alpha ledger not fully cleared for net {:?}", - net + "alpha ledger not fully cleared for net {net:?}" ); assert!( !SubtensorModule::if_subnet_exist(net), - "subnet {:?} still exists", - net + "subnet {net:?} still exists" ); assert!( pallet_subtensor_swap::Ticks::::iter_prefix(net) .next() .is_none(), - "ticks not cleared for net {:?}", - net + "ticks not cleared for net {net:?}" ); assert!( !pallet_subtensor_swap::Positions::::iter() .any(|((n, _owner, _pid), _)| n == net), - "swap positions not fully cleared for net {:?}", - net + "swap positions not fully cleared for net {net:?}" ); assert_eq!( pallet_subtensor_swap::FeeGlobalTao::::get(net).saturating_to_num::(), 0, - "FeeGlobalTao nonzero for net {:?}", - net + "FeeGlobalTao nonzero for net {net:?}" ); assert_eq!( pallet_subtensor_swap::FeeGlobalAlpha::::get(net).saturating_to_num::(), 0, - "FeeGlobalAlpha nonzero for net {:?}", - net + "FeeGlobalAlpha nonzero for net {net:?}" ); assert_eq!( pallet_subtensor_swap::CurrentLiquidity::::get(net), 0, - "CurrentLiquidity not zero for net {:?}", - net + "CurrentLiquidity not zero for net {net:?}" ); assert!( !pallet_subtensor_swap::SwapV3Initialized::::get(net), @@ -1603,8 +1585,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( pallet_subtensor_swap::TickIndexBitmapWords::::iter_prefix((net,)) .next() .is_none(), - "TickIndexBitmapWords not cleared for net {:?}", - net + "TickIndexBitmapWords not cleared for net {net:?}" ); } @@ -1679,15 +1660,13 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( assert_eq!( after_tau, before_tau.saturating_sub(min_amount_required), - "τ did not decrease by the min required restake amount for cold {:?}", - cold + "τ did not decrease by the min required restake amount for cold {cold:?}" ); // α minted equals the simulated swap’s net out for that same τ. assert_eq!( a_delta, expected_alpha_out, - "α minted mismatch for cold {:?} (hot {:?}) on new net (αΔ {}, expected {})", - cold, hot1, a_delta, expected_alpha_out + "α minted mismatch for cold {cold:?} (hot {hot1:?}) on new net (αΔ {a_delta}, expected {expected_alpha_out})" ); } diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 3b07aaa768..9befe6b828 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1217,7 +1217,6 @@ impl Pallet { /// - If the coldkey owns multiple hotkeys, split pro‑rata by current α stake on this subnet. /// If all stakes are zero, split evenly. /// - If no hotkeys exist, fall back to (coldkey, coldkey). - /// pub fn refund_alpha(netuid: NetUid, coldkey: &T::AccountId, alpha_total: AlphaCurrency) { if alpha_total.is_zero() { return; @@ -1237,44 +1236,53 @@ impl Pallet { .iter() .map(|hk| { let bal = T::BalanceOps::alpha_balance(netuid, coldkey, hk); - bal.to_u64() as u128 + u128::from(bal.to_u64()) }) .collect(); - let sum_weights: u128 = weights.iter().copied().sum(); - let n: u128 = hotkeys.len() as u128; + let sum_weights: u128 = weights + .iter() + .copied() + .fold(0u128, |acc, w| acc.saturating_add(w)); + let n: u128 = u128::from(hotkeys.len() as u64); - let total_alpha_u128: u128 = alpha_total.to_u64() as u128; + let total_alpha_u128: u128 = u128::from(alpha_total.to_u64()); // 3) Compute integer shares with remainder handling. let mut shares: sp_std::vec::Vec<(T::AccountId, u64)> = sp_std::vec::Vec::with_capacity(hotkeys.len()); + if sum_weights > 0 { // Pro‑rata by weights. let mut assigned: u128 = 0; for (hk, w) in hotkeys.iter().cloned().zip(weights.iter().copied()) { - let part: u128 = (total_alpha_u128.saturating_mul(w)) / sum_weights; - shares.push((hk, part as u64)); + let numerator = total_alpha_u128.saturating_mul(w); + let part: u128 = numerator.checked_div(sum_weights).unwrap_or(0); + shares.push((hk, u64::try_from(part).unwrap_or(u64::MAX))); assigned = assigned.saturating_add(part); } - let mut remainder: u64 = total_alpha_u128 - .saturating_sub(assigned) - .try_into() - .unwrap_or(0); - let len = shares.len(); - let mut i = 0usize; - while remainder > 0 && i < len { - shares[i].1 = shares[i].1.saturating_add(1); - remainder = remainder.saturating_sub(1); - i += 1; + + // Distribute remainder one‑by‑one. + let mut remainder: u128 = total_alpha_u128.saturating_sub(assigned); + let mut i: usize = 0; + while remainder > 0 && i < shares.len() { + if let Some(pair) = shares.get_mut(i) { + pair.1 = pair.1.saturating_add(1); + remainder = remainder.saturating_sub(1); + i = i.saturating_add(1); + } else { + break; + } } } else { // Even split. - let base: u64 = (total_alpha_u128 / n) as u64; - let mut remainder: u64 = (total_alpha_u128 % n) as u64; + let base_u128 = total_alpha_u128.checked_div(n).unwrap_or(0); + let mut remainder_u128 = total_alpha_u128.checked_rem(n).unwrap_or(0); + + let base: u64 = u64::try_from(base_u128).unwrap_or(u64::MAX); for hk in hotkeys.into_iter() { - let add_one = if remainder > 0 { - remainder = remainder.saturating_sub(1); + let add_one: u64 = if remainder_u128 > 0 { + remainder_u128 = remainder_u128.saturating_sub(1); 1 } else { 0 @@ -1296,12 +1304,7 @@ impl Pallet { Ok(_) => successes.push(hk.clone()), Err(e) => { log::warn!( - "refund_alpha_to_hotkeys: increase_stake failed (cold={:?}, hot={:?}, netuid={:?}, amt={:?}): {:?}", - coldkey, - hk, - netuid, - amt_u64, - e + "refund_alpha_to_hotkeys: increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={amt_u64:?}): {e:?}" ); leftover = leftover.saturating_add(*amt_u64); } @@ -1310,13 +1313,13 @@ impl Pallet { // 5) Retry: spread any leftover across the hotkeys that succeeded in step 4. if leftover > 0 && !successes.is_empty() { - let count = successes.len() as u64; - let base = leftover / count; - let mut rem = leftover % count; + let count_u64 = successes.len() as u64; + let base = leftover.checked_div(count_u64).unwrap_or(0); + let mut rem = leftover.checked_rem(count_u64).unwrap_or(0); for hk in successes.iter() { let add: u64 = base.saturating_add(if rem > 0 { - rem -= 1; + rem = rem.saturating_sub(1); 1 } else { 0 @@ -1341,7 +1344,7 @@ impl Pallet { /// * Remove **all** positions via `do_remove_liquidity`. /// * **Refund** each owner: /// - TAO = Σ(position.tao + position.fee_tao) → credited to the owner's **coldkey** free balance. - /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back + /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back via `refund_alpha`. /// * Decrease "provided reserves" (principal only) for non‑protocol owners. /// * Clear ActiveTickIndexManager entries, ticks, fee globals, price, tick, liquidity, /// init flag, bitmap words, fee rate knob, and user LP flag. @@ -1398,15 +1401,14 @@ impl Pallet { } } - // 3) Process refunds per owner (no skipping). + // 3) Process refunds per owner. for (owner, (tao_sum, alpha_sum)) in refunds.into_iter() { // TAO → coldkey free balance if tao_sum > TaoCurrency::ZERO { T::BalanceOps::increase_balance(&owner, tao_sum); } - // α → split across all owned hotkeys (never skip). Skip α for protocol account - // because protocol liquidity does not map to user stake. + // α → split across all hotkeys owned by `owner`. if !alpha_sum.is_zero() && owner != protocol_account { Self::refund_alpha(netuid, &owner, alpha_sum); } @@ -1440,9 +1442,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={:?}, mode=V3, user_lp_enabled={}, v3_state_cleared + refunds", - netuid, - user_lp_enabled + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, user_lp_enabled={user_lp_enabled}, v3_state_cleared + refunds" ); return Ok(()); @@ -1450,10 +1450,8 @@ impl Pallet { // -------- V2 / non‑V3: no positions to close; still nuke any V3 residues -------- - // Positions (StorageNMap) – prefix is (netuid,) let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); - // Active ticks set via ticks present (if any) let active_ticks: sp_std::vec::Vec = Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); for ti in active_ticks { @@ -1475,9 +1473,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, user_lp_enabled={}, state_cleared", - netuid, - user_lp_enabled + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, user_lp_enabled={user_lp_enabled}, state_cleared" ); Ok(()) From d4d8619ba81594cf689404e84daa69f6c0be698e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 20 Aug 2025 14:40:07 -0700 Subject: [PATCH 070/379] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 8962b762f4..5bd265f189 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -213,7 +213,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 302, + spec_version: 303, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From b8c2e3064c61aa11af8a1c0db5f507060557f411 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 21 Aug 2025 22:17:11 +0800 Subject: [PATCH 071/379] bump version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 1d8e658f1c..7185922a9e 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -215,7 +215,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 303, + spec_version: 304, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From b75c6e1f7c7f8a2aa7f18211a547d28ab52a2ce1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:04:41 -0700 Subject: [PATCH 072/379] DefaultSubnetLimit 148 --- pallets/subtensor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 5ae36117b5..916996c5b4 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -872,7 +872,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for subnet limit. pub fn DefaultSubnetLimit() -> u16 { - 128 + 148 } #[pallet::storage] From adf9d251c0addf9d3944f11f82ef6c9746ecd335 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 11:16:03 -0700 Subject: [PATCH 073/379] use price instead of emission --- pallets/subtensor/src/coinbase/root.rs | 28 ++++++++++++++------------ pallets/subtensor/src/macros/errors.rs | 2 +- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 8a95c9e8df..f5e02371ef 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -22,6 +22,7 @@ use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; use substrate_fixed::types::I64F64; +use substrate_fixed::types::U96F32; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; @@ -583,14 +584,19 @@ impl Pallet { pub fn get_network_to_prune() -> Option { let current_block: u64 = Self::get_current_block_as_u64(); - let total_networks: u16 = TotalNetworks::::get(); let mut candidate_netuid: Option = None; - let mut candidate_emission: u64 = u64::MAX; + let mut candidate_price: U96F32 = U96F32::saturating_from_num(u128::MAX); let mut candidate_timestamp: u64 = u64::MAX; - for net in 1..=total_networks { - let netuid: NetUid = net.into(); + for (netuid, added) in NetworksAdded::::iter() { + if !added || netuid == NetUid::ROOT { + continue; + } + if !Self::if_subnet_exist(netuid) { + continue; + } + let registered_at = NetworkRegisteredAt::::get(netuid); // Skip immune networks. @@ -598,18 +604,14 @@ impl Pallet { continue; } - // Sum AlphaCurrency as u64 for comparison. - let total_emission: u64 = Emission::::get(netuid) - .into_iter() - .map(Into::::into) - .sum(); + let price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); - // If tie on total_emission, earliest registration wins. - if total_emission < candidate_emission - || (total_emission == candidate_emission && registered_at < candidate_timestamp) + // If tie on price, earliest registration wins. + if price < candidate_price + || (price == candidate_price && registered_at < candidate_timestamp) { candidate_netuid = Some(netuid); - candidate_emission = total_emission; + candidate_price = price; candidate_timestamp = registered_at; } } diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 3708c5c9cd..a5fcce3da4 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -244,7 +244,7 @@ mod errors { SymbolAlreadyInUse, /// Incorrect commit-reveal version. IncorrectCommitRevealVersion, - /// Subnet limit reached & no eligible subnet to prune + /// Subnet limit reached & there is no eligible subnet to prune SubnetLimitReached, } } From 203ebf21dd13b591565a8a2f29458e71ae0cf6e2 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 11:34:17 -0700 Subject: [PATCH 074/379] update prune tests for price --- pallets/subtensor/src/tests/networks.rs | 27 ++++++++++++------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 4659ee4bbe..7dad4f8543 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -5,7 +5,7 @@ use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::U256; use sp_std::collections::btree_map::BTreeMap; -use substrate_fixed::types::{U64F64, U96F32}; +use substrate_fixed::types::{U64F64, U96F32, I96F32}; use subtensor_runtime_common::TaoCurrency; use subtensor_swap_interface::SwapHandler; @@ -731,7 +731,7 @@ fn prune_none_when_all_networks_immune() { } #[test] -fn prune_selects_network_with_lowest_emission() { +fn prune_selects_network_with_lowest_price() { new_test_ext(0).execute_with(|| { let n1 = add_dynamic_network(&U256::from(20), &U256::from(10)); let n2 = add_dynamic_network(&U256::from(40), &U256::from(30)); @@ -740,16 +740,16 @@ fn prune_selects_network_with_lowest_emission() { let imm = SubtensorModule::get_network_immunity_period(); System::set_block_number(imm + 10); - // n1 has lower total emission - Emission::::insert(n1, vec![AlphaCurrency::from(5)]); - Emission::::insert(n2, vec![AlphaCurrency::from(100)]); + // n1 has lower price → should be pruned + SubnetMovingPrice::::insert(n1, I96F32::from_num(1)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(10)); assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); }); } #[test] -fn prune_ignores_immune_network_even_if_lower_emission() { +fn prune_ignores_immune_network_even_if_lower_price() { new_test_ext(0).execute_with(|| { // create mature network n1 first let n1 = add_dynamic_network(&U256::from(22), &U256::from(11)); @@ -760,9 +760,9 @@ fn prune_ignores_immune_network_even_if_lower_emission() { // create second network n2 *inside* immunity let n2 = add_dynamic_network(&U256::from(44), &U256::from(33)); - // emissions: n1 bigger, n2 smaller but immune - Emission::::insert(n1, vec![AlphaCurrency::from(50)]); - Emission::::insert(n2, vec![AlphaCurrency::from(1)]); + // prices: n2 lower but immune; n1 must be selected + SubnetMovingPrice::::insert(n1, I96F32::from_num(5)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(1)); System::set_block_number(imm + 10); // still immune for n2 assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); @@ -770,7 +770,7 @@ fn prune_ignores_immune_network_even_if_lower_emission() { } #[test] -fn prune_tie_on_emission_earlier_registration_wins() { +fn prune_tie_on_price_earlier_registration_wins() { new_test_ext(0).execute_with(|| { // n1 registered first let n1 = add_dynamic_network(&U256::from(66), &U256::from(55)); @@ -783,11 +783,10 @@ fn prune_tie_on_emission_earlier_registration_wins() { let imm = SubtensorModule::get_network_immunity_period(); System::set_block_number(imm + 20); - // identical emissions → tie - Emission::::insert(n1, vec![AlphaCurrency::from(123)]); - Emission::::insert(n2, vec![AlphaCurrency::from(123)]); + // identical prices → tie; earlier (n1) must be chosen + SubnetMovingPrice::::insert(n1, I96F32::from_num(7)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(7)); - // earlier (n1) must be chosen assert_eq!(SubtensorModule::get_network_to_prune(), Some(n1)); }); } From a0c93a86c961f94ba4e23ea5d785d9c86f009a68 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 12:18:03 -0700 Subject: [PATCH 075/379] add test prune_selection_complex_state_exhaustive --- pallets/subtensor/src/tests/networks.rs | 213 ++++++++++++++++++++++-- 1 file changed, 196 insertions(+), 17 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 7dad4f8543..55eba699fd 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -3,9 +3,10 @@ use crate::migrations::migrate_network_immunity_period; use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; +use pallet_subtensor_swap::{AlphaSqrtPrice, SwapV3Initialized}; use sp_core::U256; use sp_std::collections::btree_map::BTreeMap; -use substrate_fixed::types::{U64F64, U96F32, I96F32}; +use substrate_fixed::types::{I96F32, U64F64, U96F32}; use subtensor_runtime_common::TaoCurrency; use subtensor_swap_interface::SwapHandler; @@ -792,29 +793,207 @@ fn prune_tie_on_price_earlier_registration_wins() { } #[test] -fn register_network_under_limit_success() { +fn prune_selection_complex_state_exhaustive() { new_test_ext(0).execute_with(|| { - SubnetLimit::::put(32u16); + let imm = SubtensorModule::get_network_immunity_period(); - let total_before = TotalNetworks::::get(); + // --------------------------------------------------------------------- + // Build a rich topology of networks with controlled registration times. + // --------------------------------------------------------------------- + // n1 + n2 in the same block (equal timestamp) to test "tie + same time". + System::set_block_number(0); + let n1 = add_dynamic_network(&U256::from(101), &U256::from(201)); + let n2 = add_dynamic_network(&U256::from(102), &U256::from(202)); // same registered_at as n1 - let cold = U256::from(10); - let hot = U256::from(11); + // Later registrations (strictly greater timestamp than n1/n2) + System::set_block_number(1); + let n3 = add_dynamic_network(&U256::from(103), &U256::from(203)); - let lock_now: u64 = SubtensorModule::get_network_lock_cost().into(); - SubtensorModule::add_balance_to_coldkey_account(&cold, lock_now.saturating_mul(10)); + System::set_block_number(2); + let n4 = add_dynamic_network(&U256::from(104), &U256::from(204)); - assert_ok!(SubtensorModule::do_register_network( - RuntimeOrigin::signed(cold), - &hot, - 1, + // Create *immune* networks that will remain ineligible initially, + // even if their price is the lowest. + System::set_block_number(imm + 5); + let n5 = add_dynamic_network(&U256::from(105), &U256::from(205)); // immune at first + + System::set_block_number(imm + 6); + let n6 = add_dynamic_network(&U256::from(106), &U256::from(206)); // immune at first + + // (Root is ignored by the selector; we may still set it for completeness.) + let root = NetUid::ROOT; + + // --------------------------------------------------------------------- + // Drive price via V3 sqrt-price path: price = (sqrt_price)^2 + // Ensure V3 is initialized so current_alpha_price() uses sqrt-price. + // --------------------------------------------------------------------- + for net in [n1, n2, n3, n4, n5, n6] { + assert_ok!( + pallet_subtensor_swap::Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + net, + true + ) + ); + SwapV3Initialized::::insert(net, true); + } + + // sqrt prices → prices: + // n1: sqrt=5 → price 25 + // n2: sqrt=5 → price 25 + // n3: sqrt=10 → price 100 + // n4: sqrt=1 → price 1 (lowest among matured initially) + // n5: sqrt=0 → price 0 (lowest overall but immune initially) + // n6: sqrt=0 → price 0 (lowest overall but immune initially) + AlphaSqrtPrice::::insert(n1, U64F64::from_num(5)); + AlphaSqrtPrice::::insert(n2, U64F64::from_num(5)); + AlphaSqrtPrice::::insert(n3, U64F64::from_num(10)); + AlphaSqrtPrice::::insert(n4, U64F64::from_num(1)); + AlphaSqrtPrice::::insert(n5, U64F64::from_num(0)); + AlphaSqrtPrice::::insert(n6, U64F64::from_num(0)); + AlphaSqrtPrice::::insert(root, U64F64::from_num(0)); + + // --------------------------------------------------------------------- + // Phase A: Only n1..n4 are mature → lowest price (n4=1) should win. + // --------------------------------------------------------------------- + System::set_block_number(imm + 10); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n4), + "Among mature nets (n1..n4), n4 has price=1 (lowest) and should be chosen." + ); + + // --------------------------------------------------------------------- + // Phase B: Tie on price with *same registration time* (n1 vs n2). + // Raise n4's price to 25 (sqrt=5) so {n1=25, n2=25, n3=100, n4=25}. + // n1 and n2 share the *same registered_at*. The tie should keep the + // first encountered (stable iteration by key order) → n1. + // --------------------------------------------------------------------- + AlphaSqrtPrice::::insert(n4, U64F64::from_num(5)); // price now 25 + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Tie on price with equal timestamps (n1,n2) → first encountered (n1) should persist." + ); + + // --------------------------------------------------------------------- + // Phase C: Tie on price with *different registration times*. + // Make n3 price=25 as well (sqrt=5). Now n1,n2,n3,n4 all have price=25. + // Earliest registration time among them is n1 (block 0). + // --------------------------------------------------------------------- + AlphaSqrtPrice::::insert(n3, U64F64::from_num(5)); // price now 25 + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Tie on price across multiple nets → earliest registration (n1) wins." + ); + + // --------------------------------------------------------------------- + // Phase D: Immune networks ignored even if strictly cheaper (0). + // n5 and n6 price=0 but still immune at (imm + 10). Ensure they are + // ignored and selection remains n1. + // --------------------------------------------------------------------- + let now = System::block_number(); + assert!( + now < NetworkRegisteredAt::::get(n5) + imm, + "n5 is immune at current block" + ); + assert!( + now < NetworkRegisteredAt::::get(n6) + imm, + "n6 is immune at current block" + ); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n1), + "Immune nets (n5,n6) must be ignored despite lower price." + ); + + // --------------------------------------------------------------------- + // Phase E: If *all* networks are immune → return None. + // Move clock back before any network's immunity expires. + // --------------------------------------------------------------------- + System::set_block_number(0); + assert_eq!( + SubtensorModule::get_network_to_prune(), None, - )); + "With all networks immune, there is no prunable candidate." + ); - assert_eq!(TotalNetworks::::get(), total_before + 1); - let new_id: NetUid = TotalNetworks::::get().into(); - assert_eq!(SubnetOwner::::get(new_id), cold); - assert_eq!(SubnetOwnerHotkey::::get(new_id), hot); + // --------------------------------------------------------------------- + // Phase F: Advance beyond immunity for n5 & n6. + // Both n5 and n6 now eligible with price=0 (lowest). + // Tie on price; earlier registration between n5 and n6 is n5. + // --------------------------------------------------------------------- + System::set_block_number(2 * imm + 10); + assert!( + System::block_number() >= NetworkRegisteredAt::::get(n5) + imm, + "n5 has matured" + ); + assert!( + System::block_number() >= NetworkRegisteredAt::::get(n6) + imm, + "n6 has matured" + ); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n5), + "After immunity, n5 (price=0) should win; tie with n6 broken by earlier registration." + ); + + // --------------------------------------------------------------------- + // Phase G: Create *sparse* netuids and ensure selection is stable. + // Remove n5; now n6 (price=0) should be selected. + // This validates robustness to holes / non-contiguous netuids. + // --------------------------------------------------------------------- + SubtensorModule::remove_network(n5); + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n6), + "After removing n5, next-lowest (n6=0) should be chosen even with sparse netuids." + ); + + // --------------------------------------------------------------------- + // Phase H: Dynamic price changes. + // Make n6 expensive (sqrt=10 → price=100); make n3 cheapest (sqrt=1 → price=1). + // --------------------------------------------------------------------- + AlphaSqrtPrice::::insert(n6, U64F64::from_num(10)); // price 100 + AlphaSqrtPrice::::insert(n3, U64F64::from_num(1)); // price 1 + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n3), + "Dynamic changes: n3 set to price=1 (lowest among eligibles) → should be pruned." + ); + + // --------------------------------------------------------------------- + // Phase I: Tie again (n2 vs n3) but earlier registration must win. + // Give n2 the same price as n3; n2 registered at block 0, n3 at block 1. + // n2 should be chosen. + // --------------------------------------------------------------------- + AlphaSqrtPrice::::insert(n2, U64F64::from_num(1)); // price 1 + assert_eq!( + SubtensorModule::get_network_to_prune(), + Some(n2), + "Tie on price across n2 (earlier reg) and n3 → n2 wins by timestamp." + ); + + // --------------------------------------------------------------------- + // (Extra) Mark n2 as 'not added' to assert we honor the `added` flag, + // then restore it to avoid side-effects on subsequent tests. + // --------------------------------------------------------------------- + NetworksAdded::::insert(n2, false); + assert_ne!( + SubtensorModule::get_network_to_prune(), + Some(n2), + "`added=false` must exclude n2 from consideration." + ); + NetworksAdded::::insert(n2, true); + + // Root is always ignored even if cheapest. + AlphaSqrtPrice::::insert(root, U64F64::from_num(0)); + assert_ne!( + SubtensorModule::get_network_to_prune(), + Some(root), + "ROOT must never be selected for pruning." + ); }); } From 8ab9fba9452b602674d2b77b69b604e110283208 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 13:32:13 -0700 Subject: [PATCH 076/379] add a few missing maps --- pallets/subtensor/src/coinbase/root.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index f5e02371ef..995071922b 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -466,11 +466,12 @@ impl Pallet { SubnetTaoInEmission::::remove(netuid); SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); - - // --- 12. Add the balance back to the owner. + TokenSymbol::::remove(netuid); + SubnetMechanism::::remove(netuid); + SubnetOwnerHotkey::::remove(netuid); SubnetOwner::::remove(netuid); - // --- 13. Remove subnet identity if it exists. + // --- 11. Remove subnet identity if it exists. if SubnetIdentitiesV3::::contains_key(netuid) { SubnetIdentitiesV3::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); From 448a78d955a721fcf49ec5b1a251c08b201c0cf0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 21 Aug 2025 14:50:45 -0700 Subject: [PATCH 077/379] clear additional maps --- pallets/subtensor/src/coinbase/root.rs | 220 ++++++++++++++++++++++-- pallets/subtensor/src/tests/networks.rs | 208 ++++++++++++++++++++-- 2 files changed, 405 insertions(+), 23 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 995071922b..b37d655d6f 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -16,13 +16,10 @@ // DEALINGS IN THE SOFTWARE. use super::*; -use frame_support::dispatch::Pays; -use frame_support::storage::IterableStorageDoubleMap; -use frame_support::weights::Weight; +use frame_support::{dispatch::Pays, weights::Weight}; use safe_math::*; use sp_core::Get; -use substrate_fixed::types::I64F64; -use substrate_fixed::types::U96F32; +use substrate_fixed::types::{I64F64, U96F32}; use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; @@ -418,10 +415,11 @@ impl Pallet { let _ = Weights::::clear_prefix(netuid, u32::MAX, None); // --- 9. Also zero out any weights *in the root network* that point to this netuid. - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - NetUid::ROOT, - ) + for (uid_i, weights_i) in as frame_support::storage::IterableStorageDoubleMap< + NetUid, + u16, + sp_std::vec::Vec<(u16, u16)>, + >>::iter_prefix(NetUid::ROOT) { let mut modified_weights = weights_i.clone(); for (subnet_id, weight) in modified_weights.iter_mut() { @@ -449,6 +447,8 @@ impl Pallet { for (_uid, key) in keys { IsNetworkMember::::remove(key, netuid); } + + // --- 11. Core per-net parameters (already present + a few that were missing). Tempo::::remove(netuid); Kappa::::remove(netuid); Difficulty::::remove(netuid); @@ -460,27 +460,221 @@ impl Pallet { RegistrationsThisInterval::::remove(netuid); POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); + + // --- 12. AMM / price / accounting (expanded). SubnetTAO::::remove(netuid); SubnetAlphaInEmission::::remove(netuid); SubnetAlphaOutEmission::::remove(netuid); SubnetTaoInEmission::::remove(netuid); SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); + + // Additional AMM & pool surfaces that can exist independently of dissolve paths: + SubnetAlphaIn::::remove(netuid); + SubnetAlphaInProvided::::remove(netuid); + SubnetAlphaOut::::remove(netuid); + SubnetTaoProvided::::remove(netuid); + + // --- 13. Token / mechanism / registration toggles that were previously left behind. TokenSymbol::::remove(netuid); SubnetMechanism::::remove(netuid); SubnetOwnerHotkey::::remove(netuid); - SubnetOwner::::remove(netuid); - - // --- 11. Remove subnet identity if it exists. + NetworkRegistrationAllowed::::remove(netuid); + NetworkPowRegistrationAllowed::::remove(netuid); + + // --- 14. Locks & toggles. + TransferToggle::::remove(netuid); + SubnetLocked::::remove(netuid); + LargestLocked::::remove(netuid); + + // --- 15. Mechanism step / emissions bookkeeping. + FirstEmissionBlockNumber::::remove(netuid); + PendingEmission::::remove(netuid); + PendingRootDivs::::remove(netuid); + PendingAlphaSwapped::::remove(netuid); + PendingOwnerCut::::remove(netuid); + BlocksSinceLastStep::::remove(netuid); + LastMechansimStepBlock::::remove(netuid); + + // --- 16. Serving / rho / curves, and other per-net controls. + ServingRateLimit::::remove(netuid); + Rho::::remove(netuid); + AlphaSigmoidSteepness::::remove(netuid); + + MaxAllowedValidators::::remove(netuid); + AdjustmentInterval::::remove(netuid); + BondsMovingAverage::::remove(netuid); + BondsPenalty::::remove(netuid); + BondsResetOn::::remove(netuid); + WeightsSetRateLimit::::remove(netuid); + ValidatorPruneLen::::remove(netuid); + ScalingLawPower::::remove(netuid); + TargetRegistrationsPerInterval::::remove(netuid); + AdjustmentAlpha::::remove(netuid); + CommitRevealWeightsEnabled::::remove(netuid); + + Burn::::remove(netuid); + MinBurn::::remove(netuid); + MaxBurn::::remove(netuid); + MinDifficulty::::remove(netuid); + MaxDifficulty::::remove(netuid); + LastAdjustmentBlock::::remove(netuid); + RegistrationsThisBlock::::remove(netuid); + EMAPriceHalvingBlocks::::remove(netuid); + RAORecycledForRegistration::::remove(netuid); + MaxRegistrationsPerBlock::::remove(netuid); + WeightsVersionKey::::remove(netuid); + + // --- 17. Subtoken / feature flags. + LiquidAlphaOn::::remove(netuid); + Yuma3On::::remove(netuid); + AlphaValues::::remove(netuid); + SubtokenEnabled::::remove(netuid); + ImmuneOwnerUidsLimit::::remove(netuid); + + // --- 18. Consensus aux vectors. + StakeWeight::::remove(netuid); + LoadedEmission::::remove(netuid); + + // --- 19. DMAPs where netuid is the FIRST key: can clear by prefix. + let _ = BlockAtRegistration::::clear_prefix(netuid, u32::MAX, None); + let _ = Axons::::clear_prefix(netuid, u32::MAX, None); + let _ = NeuronCertificates::::clear_prefix(netuid, u32::MAX, None); + let _ = Prometheus::::clear_prefix(netuid, u32::MAX, None); + let _ = AlphaDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); + let _ = TaoDividendsPerSubnet::::clear_prefix(netuid, u32::MAX, None); + let _ = PendingChildKeys::::clear_prefix(netuid, u32::MAX, None); + let _ = AssociatedEvmAddress::::clear_prefix(netuid, u32::MAX, None); + + // Commit-reveal / weights commits (all per-net prefixes): + let _ = WeightCommits::::clear_prefix(netuid, u32::MAX, None); + let _ = TimelockedWeightCommits::::clear_prefix(netuid, u32::MAX, None); + let _ = CRV3WeightCommits::::clear_prefix(netuid, u32::MAX, None); + let _ = CRV3WeightCommitsV2::::clear_prefix(netuid, u32::MAX, None); + RevealPeriodEpochs::::remove(netuid); + + // Last hotkey swap (DMAP where netuid is FIRST key → easy) + let _ = LastHotkeySwapOnNetuid::::clear_prefix(netuid, u32::MAX, None); + + // --- 20. Identity maps across versions (netuid-scoped). + SubnetIdentities::::remove(netuid); + SubnetIdentitiesV2::::remove(netuid); if SubnetIdentitiesV3::::contains_key(netuid) { SubnetIdentitiesV3::::remove(netuid); Self::deposit_event(Event::SubnetIdentityRemoved(netuid)); } - // --- Log final removal. + // --- 21. DMAP / NMAP where netuid is NOT the first key → iterate & remove. + + // ChildkeyTake: (hot, netuid) → u16 + { + let to_rm: sp_std::vec::Vec = ChildkeyTake::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm { + ChildkeyTake::::remove(&hot, netuid); + } + } + // ChildKeys: (parent, netuid) → Vec<...> + { + let to_rm: sp_std::vec::Vec = ChildKeys::::iter() + .filter_map(|(parent, n, _)| if n == netuid { Some(parent) } else { None }) + .collect(); + for parent in to_rm { + ChildKeys::::remove(&parent, netuid); + } + } + // ParentKeys: (child, netuid) → Vec<...> + { + let to_rm: sp_std::vec::Vec = ParentKeys::::iter() + .filter_map(|(child, n, _)| if n == netuid { Some(child) } else { None }) + .collect(); + for child in to_rm { + ParentKeys::::remove(&child, netuid); + } + } + // LastHotkeyEmissionOnNetuid: (hot, netuid) → α + { + let to_rm: sp_std::vec::Vec = LastHotkeyEmissionOnNetuid::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm { + LastHotkeyEmissionOnNetuid::::remove(&hot, netuid); + } + } + // TotalHotkeyAlpha / TotalHotkeyAlphaLastEpoch / TotalHotkeyShares: (hot, netuid) → ... + { + let to_rm_alpha: sp_std::vec::Vec = TotalHotkeyAlpha::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm_alpha { + TotalHotkeyAlpha::::remove(&hot, netuid); + } + + let to_rm_alpha_last: sp_std::vec::Vec = + TotalHotkeyAlphaLastEpoch::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm_alpha_last { + TotalHotkeyAlphaLastEpoch::::remove(&hot, netuid); + } + + let to_rm_shares: sp_std::vec::Vec = TotalHotkeyShares::::iter() + .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) + .collect(); + for hot in to_rm_shares { + TotalHotkeyShares::::remove(&hot, netuid); + } + } + // Alpha shares NMAP: (hot, cold, netuid) → U64F64 + { + let to_rm: sp_std::vec::Vec<(T::AccountId, T::AccountId)> = Alpha::::iter() + .filter_map( + |((hot, cold, n), _)| if n == netuid { Some((hot, cold)) } else { None }, + ) + .collect(); + for (hot, cold) in to_rm { + Alpha::::remove((hot, cold, netuid)); + } + } + // TransactionKeyLastBlock NMAP: (hot, netuid, name) → u64 + { + let to_rm: sp_std::vec::Vec<(T::AccountId, u16)> = TransactionKeyLastBlock::::iter() + .filter_map( + |((hot, n, name), _)| if n == netuid { Some((hot, name)) } else { None }, + ) + .collect(); + for (hot, name) in to_rm { + TransactionKeyLastBlock::::remove((hot, netuid, name)); + } + } + // StakingOperationRateLimiter NMAP: (hot, cold, netuid) → bool + { + let to_rm: sp_std::vec::Vec<(T::AccountId, T::AccountId)> = + StakingOperationRateLimiter::::iter() + .filter_map( + |((hot, cold, n), _)| { + if n == netuid { Some((hot, cold)) } else { None } + }, + ) + .collect(); + for (hot, cold) in to_rm { + StakingOperationRateLimiter::::remove((hot, cold, netuid)); + } + } + + // --- 22. Subnet leasing: remove mapping and any lease-scoped state linked to this netuid. + if let Some(lease_id) = SubnetUidToLeaseId::::take(netuid) { + SubnetLeases::::remove(lease_id); + let _ = SubnetLeaseShares::::clear_prefix(lease_id, u32::MAX, None); + AccumulatedLeaseDividends::::remove(lease_id); + } + + // --- Final removal logging. log::debug!( "remove_network: netuid={netuid}, owner={owner_coldkey:?} removed successfully" ); + Self::deposit_event(Event::NetworkRemoved(netuid)); } #[allow(clippy::arithmetic_side_effects)] diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 55eba699fd..d476e6eaa0 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -286,12 +286,15 @@ fn dissolve_clears_all_per_subnet_storages() { // ------------------------------------------------------------------ // Populate each storage item with a minimal value of the CORRECT type // ------------------------------------------------------------------ + // Core ownership / bookkeeping SubnetOwner::::insert(net, owner_cold); + SubnetOwnerHotkey::::insert(net, owner_hot); SubnetworkN::::insert(net, 0u16); NetworkModality::::insert(net, 0u16); NetworksAdded::::insert(net, true); NetworkRegisteredAt::::insert(net, 0u64); + // Consensus vectors Rank::::insert(net, vec![1u16]); Trust::::insert(net, vec![1u16]); Active::::insert(net, vec![true]); @@ -301,10 +304,10 @@ fn dissolve_clears_all_per_subnet_storages() { Dividends::::insert(net, vec![1u16]); PruningScores::::insert(net, vec![1u16]); LastUpdate::::insert(net, vec![0u64]); - ValidatorPermit::::insert(net, vec![true]); ValidatorTrust::::insert(net, vec![1u16]); + // Per‑net params Tempo::::insert(net, 1u16); Kappa::::insert(net, 1u16); Difficulty::::insert(net, 1u64); @@ -319,13 +322,14 @@ fn dissolve_clears_all_per_subnet_storages() { POWRegistrationsThisInterval::::insert(net, 1u16); BurnRegistrationsThisInterval::::insert(net, 1u16); + // Pool / AMM counters SubnetTAO::::insert(net, TaoCurrency::from(1)); SubnetAlphaInEmission::::insert(net, AlphaCurrency::from(1)); SubnetAlphaOutEmission::::insert(net, AlphaCurrency::from(1)); SubnetTaoInEmission::::insert(net, TaoCurrency::from(1)); SubnetVolume::::insert(net, 1u128); - // Fields that will be ZEROED (not removed) + // Items now REMOVED (not zeroed) by dissolution SubnetAlphaIn::::insert(net, AlphaCurrency::from(2)); SubnetAlphaOut::::insert(net, AlphaCurrency::from(3)); @@ -333,7 +337,97 @@ fn dissolve_clears_all_per_subnet_storages() { Keys::::insert(net, 0u16, owner_hot); Bonds::::insert(net, 0u16, vec![(0u16, 1u16)]); Weights::::insert(net, 0u16, vec![(1u16, 1u16)]); - IsNetworkMember::::insert(owner_cold, net, true); + + // Membership entry for the SAME hotkey as Keys + IsNetworkMember::::insert(owner_hot, net, true); + + + // Token / price / provided reserves + TokenSymbol::::insert(net, b"XX".to_vec()); + SubnetMovingPrice::::insert(net, substrate_fixed::types::I96F32::from_num(1)); + SubnetTaoProvided::::insert(net, TaoCurrency::from(1)); + SubnetAlphaInProvided::::insert(net, AlphaCurrency::from(1)); + + // Subnet locks + TransferToggle::::insert(net, true); + SubnetLocked::::insert(net, TaoCurrency::from(1)); + LargestLocked::::insert(net, 1u64); + + // Subnet parameters & pending counters + FirstEmissionBlockNumber::::insert(net, 1u64); + SubnetMechanism::::insert(net, 1u16); + NetworkRegistrationAllowed::::insert(net, true); + NetworkPowRegistrationAllowed::::insert(net, true); + PendingEmission::::insert(net, AlphaCurrency::from(1)); + PendingRootDivs::::insert(net, TaoCurrency::from(1)); + PendingAlphaSwapped::::insert(net, AlphaCurrency::from(1)); + PendingOwnerCut::::insert(net, AlphaCurrency::from(1)); + BlocksSinceLastStep::::insert(net, 1u64); + LastMechansimStepBlock::::insert(net, 1u64); + ServingRateLimit::::insert(net, 1u64); + Rho::::insert(net, 1u16); + AlphaSigmoidSteepness::::insert(net, 1i16); + + // Weights/versioning/targets/limits + WeightsVersionKey::::insert(net, 1u64); + MaxAllowedValidators::::insert(net, 1u16); + AdjustmentInterval::::insert(net, 2u16); + BondsMovingAverage::::insert(net, 1u64); + BondsPenalty::::insert(net, 1u16); + BondsResetOn::::insert(net, true); + WeightsSetRateLimit::::insert(net, 1u64); + ValidatorPruneLen::::insert(net, 1u64); + ScalingLawPower::::insert(net, 1u16); + TargetRegistrationsPerInterval::::insert(net, 1u16); + AdjustmentAlpha::::insert(net, 1u64); + CommitRevealWeightsEnabled::::insert(net, true); + + // Burn/difficulty/adjustment + Burn::::insert(net, TaoCurrency::from(1)); + MinBurn::::insert(net, TaoCurrency::from(1)); + MaxBurn::::insert(net, TaoCurrency::from(2)); + MinDifficulty::::insert(net, 1u64); + MaxDifficulty::::insert(net, 2u64); + LastAdjustmentBlock::::insert(net, 1u64); + RegistrationsThisBlock::::insert(net, 1u16); + EMAPriceHalvingBlocks::::insert(net, 1u64); + RAORecycledForRegistration::::insert(net, TaoCurrency::from(1)); + + // Feature toggles + LiquidAlphaOn::::insert(net, true); + Yuma3On::::insert(net, true); + AlphaValues::::insert(net, (1u16, 2u16)); + SubtokenEnabled::::insert(net, true); + ImmuneOwnerUidsLimit::::insert(net, 1u16); + + // Per‑subnet vectors / indexes + StakeWeight::::insert(net, vec![1u16]); + + // Uid/registration + Uids::::insert(net, owner_hot, 0u16); + BlockAtRegistration::::insert(net, 0u16, 1u64); + + // Per‑subnet dividends + AlphaDividendsPerSubnet::::insert(net, owner_hot, AlphaCurrency::from(1)); + TaoDividendsPerSubnet::::insert(net, owner_hot, TaoCurrency::from(1)); + + // Parent/child topology + takes + ChildkeyTake::::insert(owner_hot, net, 1u16); + PendingChildKeys::::insert(net, owner_cold, (vec![(1u64, owner_hot)], 1u64)); + ChildKeys::::insert(owner_cold, net, vec![(1u64, owner_hot)]); + ParentKeys::::insert(owner_hot, net, vec![(1u64, owner_cold)]); + + // Hotkey swap timestamp for subnet + LastHotkeySwapOnNetuid::::insert(net, owner_cold, 1u64); + + // Axon/prometheus tx key timing (NMap) — ***correct key-tuple insertion*** + TransactionKeyLastBlock::::insert((owner_hot, net, 1u16), 1u64); + + // EVM association indexed by (netuid, uid) + AssociatedEvmAddress::::insert(net, 0u16, (sp_core::H160::zero(), 1u64)); + + // (Optional) subnet -> lease link + SubnetUidToLeaseId::::insert(net, 42u32); // ------------------------------------------------------------------ // Dissolve @@ -344,11 +438,13 @@ fn dissolve_clears_all_per_subnet_storages() { // Items that must be COMPLETELY REMOVED // ------------------------------------------------------------------ assert!(!SubnetOwner::::contains_key(net)); + assert!(!SubnetOwnerHotkey::::contains_key(net)); assert!(!SubnetworkN::::contains_key(net)); assert!(!NetworkModality::::contains_key(net)); assert!(!NetworksAdded::::contains_key(net)); assert!(!NetworkRegisteredAt::::contains_key(net)); + // Consensus vectors removed assert!(!Rank::::contains_key(net)); assert!(!Trust::::contains_key(net)); assert!(!Active::::contains_key(net)); @@ -362,6 +458,7 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!ValidatorPermit::::contains_key(net)); assert!(!ValidatorTrust::::contains_key(net)); + // Per‑net params removed assert!(!Tempo::::contains_key(net)); assert!(!Kappa::::contains_key(net)); assert!(!Difficulty::::contains_key(net)); @@ -376,26 +473,117 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!POWRegistrationsThisInterval::::contains_key(net)); assert!(!BurnRegistrationsThisInterval::::contains_key(net)); + // Pool / AMM counters removed assert!(!SubnetTAO::::contains_key(net)); assert!(!SubnetAlphaInEmission::::contains_key(net)); assert!(!SubnetAlphaOutEmission::::contains_key(net)); assert!(!SubnetTaoInEmission::::contains_key(net)); assert!(!SubnetVolume::::contains_key(net)); - // ------------------------------------------------------------------ - // Items expected to be PRESENT but ZERO - // ------------------------------------------------------------------ - assert_eq!(SubnetAlphaIn::::get(net), 0.into()); - assert_eq!(SubnetAlphaOut::::get(net), 0.into()); + // These are now REMOVED + assert!(!SubnetAlphaIn::::contains_key(net)); + assert!(!SubnetAlphaOut::::contains_key(net)); - // ------------------------------------------------------------------ // Collections fully cleared - // ------------------------------------------------------------------ assert!(Keys::::iter_prefix(net).next().is_none()); assert!(Bonds::::iter_prefix(net).next().is_none()); assert!(Weights::::iter_prefix(net).next().is_none()); assert!(!IsNetworkMember::::contains_key(owner_hot, net)); + // Token / price / provided reserves + assert!(!TokenSymbol::::contains_key(net)); + assert!(!SubnetMovingPrice::::contains_key(net)); + assert!(!SubnetTaoProvided::::contains_key(net)); + assert!(!SubnetAlphaInProvided::::contains_key(net)); + + // Subnet locks + assert!(!TransferToggle::::contains_key(net)); + assert!(!SubnetLocked::::contains_key(net)); + assert!(!LargestLocked::::contains_key(net)); + + // Subnet parameters & pending counters + assert!(!FirstEmissionBlockNumber::::contains_key(net)); + assert!(!SubnetMechanism::::contains_key(net)); + assert!(!NetworkRegistrationAllowed::::contains_key(net)); + assert!(!NetworkPowRegistrationAllowed::::contains_key(net)); + assert!(!PendingEmission::::contains_key(net)); + assert!(!PendingRootDivs::::contains_key(net)); + assert!(!PendingAlphaSwapped::::contains_key(net)); + assert!(!PendingOwnerCut::::contains_key(net)); + assert!(!BlocksSinceLastStep::::contains_key(net)); + assert!(!LastMechansimStepBlock::::contains_key(net)); + assert!(!ServingRateLimit::::contains_key(net)); + assert!(!Rho::::contains_key(net)); + assert!(!AlphaSigmoidSteepness::::contains_key(net)); + + // Weights/versioning/targets/limits + assert!(!WeightsVersionKey::::contains_key(net)); + assert!(!MaxAllowedValidators::::contains_key(net)); + assert!(!AdjustmentInterval::::contains_key(net)); + assert!(!BondsMovingAverage::::contains_key(net)); + assert!(!BondsPenalty::::contains_key(net)); + assert!(!BondsResetOn::::contains_key(net)); + assert!(!WeightsSetRateLimit::::contains_key(net)); + assert!(!ValidatorPruneLen::::contains_key(net)); + assert!(!ScalingLawPower::::contains_key(net)); + assert!(!TargetRegistrationsPerInterval::::contains_key(net)); + assert!(!AdjustmentAlpha::::contains_key(net)); + assert!(!CommitRevealWeightsEnabled::::contains_key(net)); + + // Burn/difficulty/adjustment + assert!(!Burn::::contains_key(net)); + assert!(!MinBurn::::contains_key(net)); + assert!(!MaxBurn::::contains_key(net)); + assert!(!MinDifficulty::::contains_key(net)); + assert!(!MaxDifficulty::::contains_key(net)); + assert!(!LastAdjustmentBlock::::contains_key(net)); + assert!(!RegistrationsThisBlock::::contains_key(net)); + assert!(!EMAPriceHalvingBlocks::::contains_key(net)); + assert!(!RAORecycledForRegistration::::contains_key(net)); + + // Feature toggles + assert!(!LiquidAlphaOn::::contains_key(net)); + assert!(!Yuma3On::::contains_key(net)); + assert!(!AlphaValues::::contains_key(net)); + assert!(!SubtokenEnabled::::contains_key(net)); + assert!(!ImmuneOwnerUidsLimit::::contains_key(net)); + + // Per‑subnet vectors / indexes + assert!(!StakeWeight::::contains_key(net)); + + // Uid/registration + assert!(Uids::::get(net, owner_hot).is_none()); + assert!(!BlockAtRegistration::::contains_key(net, 0u16)); + + // Per‑subnet dividends + assert!(!AlphaDividendsPerSubnet::::contains_key( + net, owner_hot + )); + assert!(!TaoDividendsPerSubnet::::contains_key(net, owner_hot)); + + // Parent/child topology + takes + assert!(!ChildkeyTake::::contains_key(owner_hot, net)); + assert!(!PendingChildKeys::::contains_key(net, owner_cold)); + assert!(!ChildKeys::::contains_key(owner_cold, net)); + assert!(!ParentKeys::::contains_key(owner_hot, net)); + + // Hotkey swap timestamp for subnet + assert!(!LastHotkeySwapOnNetuid::::contains_key( + net, owner_cold + )); + + // Axon/prometheus tx key timing (NMap) — ValueQuery (defaults to 0) + assert_eq!( + TransactionKeyLastBlock::::get((owner_hot, net, 1u16)), + 0u64 + ); + + // EVM association + assert!(AssociatedEvmAddress::::get(net, 0u16).is_none()); + + // Subnet -> lease link + assert!(!SubnetUidToLeaseId::::contains_key(net)); + // ------------------------------------------------------------------ // Final subnet removal confirmation // ------------------------------------------------------------------ From bbc035fce05a5b7495cca330139e14a066263f67 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 22 Aug 2025 17:30:14 -0400 Subject: [PATCH 078/379] Basic framing for sub-subnets --- common/src/lib.rs | 34 ++++ pallets/subtensor/src/coinbase/block_step.rs | 2 + .../subtensor/src/coinbase/run_coinbase.rs | 2 +- pallets/subtensor/src/lib.rs | 32 ++++ pallets/subtensor/src/subnets/mod.rs | 1 + pallets/subtensor/src/subnets/subsubnet.rs | 161 ++++++++++++++++++ pallets/subtensor/src/tests/mod.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 13 ++ runtime/src/lib.rs | 4 +- 9 files changed, 247 insertions(+), 3 deletions(-) create mode 100644 pallets/subtensor/src/subnets/subsubnet.rs create mode 100644 pallets/subtensor/src/tests/subsubnet.rs diff --git a/common/src/lib.rs b/common/src/lib.rs index a3882a88fc..ed7cc0fe88 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -223,6 +223,40 @@ pub mod time { pub const DAYS: BlockNumber = HOURS * 24; } +#[freeze_struct("8e576b32bb1bb664")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct SubId(u8); + +impl From for SubId { + fn from(value: u8) -> Self { + Self(value) + } +} + +impl From for u16 { + fn from(val: SubId) -> Self { + u16::from(val.0) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6a96090b05..6385a7f756 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -21,6 +21,8 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); + // --- 5. Update sub-subnet counts + Self::update_subsubnet_counts_if_needed(block_number); // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index dcdab8072e..a71ba0d964 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -641,7 +641,7 @@ impl Pallet { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_subsubnets(netuid, pending_alpha.saturating_add(pending_swapped)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 173d9bd0f4..7fa6c8a919 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1797,6 +1797,38 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; + /// ====================== + /// ==== Sub-subnets ===== + /// ====================== + #[pallet::type_value] + /// -- ITEM (Default number of sub-subnets) + pub fn DefaultSubsubnetCount() -> u8 { + 1 + } + #[pallet::type_value] + /// -- ITEM (Maximum number of sub-subnets) + pub fn MaxSubsubnetCount() -> u8 { + 8 + } + #[pallet::type_value] + /// -- ITEM (Number of tempos in subnet super-block) + pub fn SuperBlockTempos() -> u16 { + 20 + } + #[pallet::type_value] + /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) + pub fn GlobalSubsubnetDecreasePerSuperblock() -> u8 { + 1 + } + #[pallet::storage] + /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. + pub type SubsubnetCountDesired = + StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Current number of sub-subnets + pub type SubsubnetCountCurrent = + StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + /// ================== /// ==== Genesis ===== /// ================== diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs index a823773395..a3705af084 100644 --- a/pallets/subtensor/src/subnets/mod.rs +++ b/pallets/subtensor/src/subnets/mod.rs @@ -3,6 +3,7 @@ pub mod leasing; pub mod registration; pub mod serving; pub mod subnet; +pub mod subsubnet; pub mod symbols; pub mod uids; pub mod weights; diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs new file mode 100644 index 0000000000..816f4818bd --- /dev/null +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -0,0 +1,161 @@ +//! This file contains all tooling to work with sub-subnets +//! + +use super::*; +use alloc::collections::BTreeMap; +use safe_math::*; +use sp_runtime::SaturatedConversion; +use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId}; + +pub type LeaseId = u32; + +pub type CurrencyOf = ::Currency; + +pub type BalanceOf = + as fungible::Inspect<::AccountId>>::Balance; + +/// Theoretical maximum of subnets on bittensor. This value is used in indexed +/// storage of epoch values for sub-subnets as +/// +/// `storage_index = netuid + sub_id * GLOBAL_MAX_SUBNET_COUNT` +/// +/// For sub_id = 0 this index results in netuid and provides backward compatibility +/// for subnets with default sub-subnet count of 1. +/// +/// Changing this value will require a migration of all epoch maps. +/// +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; + +impl Pallet { + pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUid { + u16::from(sub_id) + .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) + .saturating_add(u16::from(netuid)) + .into() + } + + /// Set the desired valus of sub-subnet count for a subnet identified + /// by netuid + pub fn do_set_desired_subsubnet_count(netuid: NetUid, subsubnet_count: u8) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Count cannot be zero + ensure!(subsubnet_count > 0, Error::::InvalidValue); + + // Make sure we are not exceeding the max sub-subnet count + ensure!( + subsubnet_count <= MaxSubsubnetCount::::get(), + Error::::InvalidValue + ); + + SubsubnetCountDesired::::insert(netuid, subsubnet_count); + Ok(()) + } + + /// Update current count for a subnet identified by netuid + /// + /// - This function should be called in every block in run_counbase + /// - Cleans up all sub-subnet maps if count is reduced + /// - Decreases current subsubnet count by no more than `GlobalSubsubnetDecreasePerSuperblock` + /// + pub fn update_subsubnet_counts_if_needed(current_block: u64) { + // Run once per super-block + let super_block_tempos = u64::from(SuperBlockTempos::::get()); + Self::get_all_subnet_netuids().iter().for_each(|netuid| { + let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); + if let Some(rem) = current_block.checked_rem(super_block) { + if rem == 0 { + let old_count = SubsubnetCountCurrent::::get(netuid); + let desired_count = SubsubnetCountDesired::::get(netuid); + let min_possible_count = old_count + .saturating_sub(GlobalSubsubnetDecreasePerSuperblock::::get()) + .max(1); + let new_count = desired_count.max(min_possible_count); + + if old_count > new_count { + + todo!(); + // Cleanup weights + // Cleanup StakeWeight + // Cleanup Active + // Cleanup Emission + // Cleanup Rank + // Cleanup Trust + // Cleanup Consensus + // Cleanup Incentive + // Cleanup Dividends + // Cleanup PruningScores + // Cleanup ValidatorTrust + // Cleanup ValidatorPermit + } + + SubsubnetCountCurrent::::insert(netuid, new_count); + } + } + }); + } + + /// Split alpha emission in sub-subnet proportions + /// Currently splits evenly between sub-subnets, but the implementation + /// may change in the future + /// + pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { + let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); + + // If there's any rounding error, credit it to subsubnet 0 + let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); + let rounding_err = + u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); + + let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; + result[0] = result[0].saturating_add(AlphaCurrency::from(rounding_err)); + result + } + + /// Splits rao_emission between different sub-subnets using `split_emissions` function. + /// + /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission + /// into a single vector. + /// + pub fn epoch_with_subsubnets( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + let aggregated: BTreeMap = + Self::split_emissions(netuid, rao_emission) + .into_iter() + .enumerate() + // Run epoch function for each subsubnet to distribute its portion of emissions + .flat_map(|(sub_id, emission)| { + // This is subsubnet ID, e.g. a 0-7 number + let sub_id_u8: u8 = sub_id.saturated_into(); + // This is netuid index for storing subsubnet data in storage maps and for using in + // epoch function + let subsub_netuid = + Self::get_subsubnet_storage_index(netuid, SubId::from(sub_id_u8)); + // epoch returns: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> + Self::epoch(subsub_netuid, emission).into_iter() + }) + // Consolidate the hotkey emissions into a single BTreeMap + .fold(BTreeMap::new(), |mut acc, (hotkey, divs, incs)| { + acc.entry(hotkey) + .and_modify(|tot| { + tot.0 = tot.0.saturating_add(divs); + tot.1 = tot.1.saturating_add(incs); + }) + .or_insert((divs, incs)); + acc + }); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing in run_coinbase + aggregated + .into_iter() + .map(|(hotkey, (divs, incs))| (hotkey, divs, incs)) + .collect() + } +} diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index b743d7c1ff..1eb922f711 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -21,6 +21,7 @@ mod serving; mod staking; mod staking2; mod subnet; +mod subsubnet; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs new file mode 100644 index 0000000000..34c7ac1043 --- /dev/null +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -0,0 +1,13 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +use super::mock::*; + +#[test] +fn test_subsubnet_emission_proportions() { + new_test_ext(1).execute_with(|| { + }); +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d7097b605c..120987a00a 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 304, + spec_version: 303, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1144,7 +1144,7 @@ parameter_types! { pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; pub const SubtensorInitialMinAllowedUids: u16 = 128; - pub const SubtensorInitialMinLockCost: u64 = prod_or_fast!(1_000_000_000_000, 100_000_000_000); // 1000 TAO for prod, 100 TAO for fast + pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent // pub const SubtensorInitialSubnetLimit: u16 = 12; // (DEPRECATED) pub const SubtensorInitialNetworkLockReductionInterval: u64 = 14 * 7200; From 25ada7ca4e434bd67cb789e3f9a1c4df882502af Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 10:11:42 -0700 Subject: [PATCH 079/379] use NetworksAdded --- pallets/subtensor/src/subnets/subnet.rs | 5 ++++- pallets/subtensor/src/tests/networks.rs | 13 ++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 21df71e8d6..6749342230 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -149,7 +149,10 @@ impl Pallet { // --- 5. Check if we need to prune a subnet (if at SubnetLimit). // But do not prune yet; we only do it after all checks pass. let subnet_limit = Self::get_max_subnets(); - let current_count = TotalNetworks::::get(); + let current_count: u16 = NetworksAdded::::iter() + .filter(|(netuid, added)| *added && *netuid != NetUid::ROOT) + .count() as u16; + let mut recycle_netuid: Option = None; if current_count >= subnet_limit { if let Some(netuid) = Self::get_network_to_prune() { diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index d476e6eaa0..283cc4498f 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -228,13 +228,13 @@ fn dissolve_owner_cut_refund_logic() { // Current α→τ price for this subnet. let price: U96F32 = ::SwapInterface::current_alpha_price(net.into()); - let owner_emission_tau_u64: u64 = U96F32::from_num(owner_alpha_u64) + let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) .saturating_mul(price) .floor() .saturating_to_num::(); let expected_refund: TaoCurrency = - lock.saturating_sub(TaoCurrency::from(owner_emission_tau_u64)); + lock.saturating_sub(TaoCurrency::from(owner_emission_tao_u64)); let before = SubtensorModule::get_coldkey_balance(&oc); assert_ok!(SubtensorModule::do_dissolve_network(net)); @@ -341,7 +341,6 @@ fn dissolve_clears_all_per_subnet_storages() { // Membership entry for the SAME hotkey as Keys IsNetworkMember::::insert(owner_hot, net, true); - // Token / price / provided reserves TokenSymbol::::insert(net, b"XX".to_vec()); SubnetMovingPrice::::insert(net, substrate_fixed::types::I96F32::from_num(1)); @@ -1999,7 +1998,7 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( let [hot1, _hot2] = cold_to_hots[&cold]; register_ok_neuron(net_new, hot1, cold, 7777); - let before_tau = SubtensorModule::get_coldkey_balance(&cold); + let before_tao = SubtensorModule::get_coldkey_balance(&cold); let a_prev: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); // Expected α for this exact τ, using the same sim path as the pallet. @@ -2018,14 +2017,14 @@ fn massive_dissolve_refund_and_reregistration_flow_is_lossless_and_cleans_state( min_amount_required.into() )); - let after_tau = SubtensorModule::get_coldkey_balance(&cold); + let after_tao = SubtensorModule::get_coldkey_balance(&cold); let a_new: u64 = Alpha::::get((hot1, cold, net_new)).saturating_to_num(); let a_delta = a_new.saturating_sub(a_prev); // τ decreased by exactly the amount we sent. assert_eq!( - after_tau, - before_tau.saturating_sub(min_amount_required), + after_tao, + before_tao.saturating_sub(min_amount_required), "τ did not decrease by the min required restake amount for cold {cold:?}" ); From b8913217ab124b453595413b4599199e0334c239 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 10:27:38 -0700 Subject: [PATCH 080/379] CannotAffordLockCost --- pallets/subtensor/src/macros/errors.rs | 2 ++ pallets/subtensor/src/subnets/subnet.rs | 4 ++-- pallets/subtensor/src/tests/networks.rs | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index 6378a525e4..8358ba9a46 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -248,5 +248,7 @@ mod errors { InvalidValue, /// Subnet limit reached & there is no eligible subnet to prune SubnetLimitReached, + /// Insufficient funds to meet the subnet lock cost + CannotAffordLockCost, } } diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 6749342230..e6be8b8cd5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -116,7 +116,7 @@ impl Pallet { /// * `MechanismDoesNotExist` – unsupported `mechid`. /// * `NetworkTxRateLimitExceeded` – caller hit the register-network rate limit. /// * `SubnetLimitReached` – limit hit **and** no eligible subnet to prune. - /// * `NotEnoughBalanceToStake` – caller lacks the lock cost. + /// * `CannotAffordLockCost` – caller lacks the lock cost. /// * `BalanceWithdrawalError` – failed to lock balance. /// * `InvalidIdentity` – supplied `identity` failed validation. /// @@ -167,7 +167,7 @@ impl Pallet { log::debug!("network lock_amount: {lock_amount:?}"); ensure!( Self::can_remove_balance_from_coldkey_account(&coldkey, lock_amount.into()), - Error::::NotEnoughBalanceToStake + Error::::CannotAffordLockCost ); // --- 7. Perform the lock operation. diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 283cc4498f..914a19e230 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1245,7 +1245,7 @@ fn register_network_fails_before_prune_keeps_existing() { 1, None, ), - Error::::NotEnoughBalanceToStake + Error::::CannotAffordLockCost ); assert!(SubtensorModule::if_subnet_exist(net)); From 1ffe46ae75247ca7e4fd4761fc8197da0bd98fe6 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 10:51:02 -0700 Subject: [PATCH 081/379] add more maps --- pallets/subtensor/src/subnets/subnet.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index e6be8b8cd5..1a76c92a40 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -104,7 +104,7 @@ impl Pallet { /// * **`origin`** – `T::RuntimeOrigin`  Must be **signed** by the coldkey. /// * **`hotkey`** – `&T::AccountId`  First neuron of the new subnet. /// * **`mechid`** – `u16`  Only the dynamic mechanism (`1`) is currently supported. - /// * **`identity`** – `Option`  Optional metadata for the subnet. + /// * **`identity`** – `Option`  Optional metadata for the subnet. /// /// ### Events /// * `NetworkAdded(netuid, mechid)` – always. @@ -218,11 +218,33 @@ impl Pallet { let pool_initial_alpha = AlphaCurrency::from(Self::get_network_min_lock().to_u64()); let actual_tao_lock_amount_less_pool_tao = actual_tao_lock_amount.saturating_sub(pool_initial_tao); + + // Core pool + ownership SubnetTAO::::insert(netuid_to_register, pool_initial_tao); SubnetAlphaIn::::insert(netuid_to_register, pool_initial_alpha); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); + // ----- NEW: Make registration defaults explicit to mirror de‑registration cleanup ----- + // Transfer gating and lock accounting + TransferToggle::::insert(netuid_to_register, true); + SubnetLocked::::insert(netuid_to_register, pool_initial_tao); + LargestLocked::::insert(netuid_to_register, pool_initial_tao.to_u64()); + + // User‑provided reserves (liquidity) and out‑supply baselines + SubnetTaoProvided::::insert(netuid_to_register, TaoCurrency::ZERO); + SubnetAlphaInProvided::::insert(netuid_to_register, AlphaCurrency::from(0)); + SubnetAlphaOut::::insert(netuid_to_register, AlphaCurrency::from(0)); + + // Market telemetry baselines + SubnetVolume::::insert(netuid_to_register, 0u128); + + // Track burned/recycled amount for this registration + RAORecycledForRegistration::::insert( + netuid_to_register, + actual_tao_lock_amount_less_pool_tao, + ); + if actual_tao_lock_amount_less_pool_tao > TaoCurrency::ZERO { Self::burn_tokens(actual_tao_lock_amount_less_pool_tao); } From c6b3af60ace8856231584c4ef1de954774def632 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 11:18:03 -0700 Subject: [PATCH 082/379] cleanup --- pallets/subtensor/src/subnets/subnet.rs | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 1a76c92a40..4581d5cfd6 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -211,11 +211,10 @@ impl Pallet { let symbol = Self::get_next_available_symbol(netuid_to_register); TokenSymbol::::insert(netuid_to_register, symbol); - // Put initial TAO from lock into subnet TAO and produce numerically equal amount of Alpha - // The initial TAO is the locked amount, with a minimum of 1 RAO and a cap of 100 TAO. - let pool_initial_tao = Self::get_network_min_lock(); - // FIXME: the result from function is used as a mixed type alpha/tao - let pool_initial_alpha = AlphaCurrency::from(Self::get_network_min_lock().to_u64()); + // The initial TAO is the locked amount + // Put initial TAO from lock into subnet TAO and produce numerically equal amount of Alpha. + let pool_initial_tao: TaoCurrency = Self::get_network_min_lock(); + let pool_initial_alpha: AlphaCurrency = pool_initial_tao.to_u64().into(); let actual_tao_lock_amount_less_pool_tao = actual_tao_lock_amount.saturating_sub(pool_initial_tao); @@ -224,22 +223,13 @@ impl Pallet { SubnetAlphaIn::::insert(netuid_to_register, pool_initial_alpha); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); - - // ----- NEW: Make registration defaults explicit to mirror de‑registration cleanup ----- - // Transfer gating and lock accounting TransferToggle::::insert(netuid_to_register, true); SubnetLocked::::insert(netuid_to_register, pool_initial_tao); LargestLocked::::insert(netuid_to_register, pool_initial_tao.to_u64()); - - // User‑provided reserves (liquidity) and out‑supply baselines SubnetTaoProvided::::insert(netuid_to_register, TaoCurrency::ZERO); SubnetAlphaInProvided::::insert(netuid_to_register, AlphaCurrency::from(0)); SubnetAlphaOut::::insert(netuid_to_register, AlphaCurrency::from(0)); - - // Market telemetry baselines SubnetVolume::::insert(netuid_to_register, 0u128); - - // Track burned/recycled amount for this registration RAORecycledForRegistration::::insert( netuid_to_register, actual_tao_lock_amount_less_pool_tao, @@ -302,6 +292,8 @@ impl Pallet { Self::set_immunity_period(netuid, 5000); Self::set_min_difficulty(netuid, u64::MAX); Self::set_max_difficulty(netuid, u64::MAX); + Self::set_commit_reveal_weights_enabled(netuid, true); + Self::set_yuma3_enabled(netuid, true); // Make network parameters explicit. if !Tempo::::contains_key(netuid) { From e3ac2e45f9d1dffd5c7ff342af7b657c5a6d20b0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 12:27:52 -0700 Subject: [PATCH 083/379] revert breaks too many tests --- pallets/subtensor/src/subnets/subnet.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 4581d5cfd6..522e267f3c 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -292,8 +292,6 @@ impl Pallet { Self::set_immunity_period(netuid, 5000); Self::set_min_difficulty(netuid, u64::MAX); Self::set_max_difficulty(netuid, u64::MAX); - Self::set_commit_reveal_weights_enabled(netuid, true); - Self::set_yuma3_enabled(netuid, true); // Make network parameters explicit. if !Tempo::::contains_key(netuid) { From 42a5815e8d89d67796f2de8fea97add78d45793f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 15:26:01 -0700 Subject: [PATCH 084/379] improve refund_alpha --- pallets/swap/src/pallet/impls.rs | 119 ++++++++++++++++--------------- 1 file changed, 63 insertions(+), 56 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index ab4d1e8af8..fb86c69e2b 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1214,84 +1214,85 @@ impl Pallet { } /// Distribute `alpha_total` back to the coldkey's hotkeys for `netuid`. - /// - If the coldkey owns multiple hotkeys, split pro‑rata by current α stake on this subnet. - /// If all stakes are zero, split evenly. - /// - If no hotkeys exist, fall back to (coldkey, coldkey). + /// - Pro‑rata by current α stake on this subnet; if all zero, split evenly. + /// - Deterministic "largest remainders" rounding to ensure exact conservation. + /// - Robust to partial deposit failures: retries across successes, final fallback to (cold, cold). pub fn refund_alpha(netuid: NetUid, coldkey: &T::AccountId, alpha_total: AlphaCurrency) { if alpha_total.is_zero() { return; } - // 1) Fetch owned hotkeys via SubnetInfo; no direct dependency on pallet_subtensor. + // 1) Recipient set let mut hotkeys: sp_std::vec::Vec = T::SubnetInfo::get_owned_hotkeys(coldkey); - - // Fallback: if no hotkeys are currently owned, use coldkey as its own hotkey. if hotkeys.is_empty() { hotkeys.push(coldkey.clone()); } - // 2) Build weights based on current α stake on this subnet. - // If sum_weights == 0, we'll split evenly. + // 2) Weights = current α stake per hotkey; if all zero -> even split let weights: sp_std::vec::Vec = hotkeys .iter() - .map(|hk| { - let bal = T::BalanceOps::alpha_balance(netuid, coldkey, hk); - u128::from(bal.to_u64()) - }) + .map(|hk| u128::from(T::BalanceOps::alpha_balance(netuid, coldkey, hk).to_u64())) .collect(); let sum_weights: u128 = weights .iter() .copied() .fold(0u128, |acc, w| acc.saturating_add(w)); - let n: u128 = u128::from(hotkeys.len() as u64); - - let total_alpha_u128: u128 = u128::from(alpha_total.to_u64()); + let total_u128: u128 = u128::from(alpha_total.to_u64()); + let n = hotkeys.len(); - // 3) Compute integer shares with remainder handling. - let mut shares: sp_std::vec::Vec<(T::AccountId, u64)> = - sp_std::vec::Vec::with_capacity(hotkeys.len()); + // (account, planned_amount_u64) + let mut shares: sp_std::vec::Vec<(T::AccountId, u64)> = sp_std::vec::Vec::with_capacity(n); if sum_weights > 0 { - // Pro‑rata by weights. - let mut assigned: u128 = 0; - for (hk, w) in hotkeys.iter().cloned().zip(weights.iter().copied()) { - let numerator = total_alpha_u128.saturating_mul(w); - let part: u128 = numerator.checked_div(sum_weights).unwrap_or(0); - shares.push((hk, u64::try_from(part).unwrap_or(u64::MAX))); - assigned = assigned.saturating_add(part); + // 3a) Pro‑rata base + largest remainders (deterministic) + let mut bases: sp_std::vec::Vec = sp_std::vec::Vec::with_capacity(n); + let mut remainders: sp_std::vec::Vec<(usize, u128)> = + sp_std::vec::Vec::with_capacity(n); + + let mut base_sum: u128 = 0; + for (i, (&w, hk)) in weights.iter().zip(hotkeys.iter()).enumerate() { + let numer = total_u128.saturating_mul(w); + let base = numer.checked_div(sum_weights).unwrap_or(0); + let rem = numer.checked_rem(sum_weights).unwrap_or(0); + bases.push(base); + remainders.push((i, rem)); + base_sum = base_sum.saturating_add(base); + shares.push((hk.clone(), u64::try_from(base).unwrap_or(u64::MAX))); } - // Distribute remainder one‑by‑one. - let mut remainder: u128 = total_alpha_u128.saturating_sub(assigned); - let mut i: usize = 0; - while remainder > 0 && i < shares.len() { - if let Some(pair) = shares.get_mut(i) { - pair.1 = pair.1.saturating_add(1); - remainder = remainder.saturating_sub(1); - i = i.saturating_add(1); - } else { - break; + // Distribute leftover ones to the largest remainders; tie‑break by index for determinism + let mut leftover = total_u128.saturating_sub(base_sum); + if leftover > 0 { + remainders.sort_by(|a, b| { + // Descending by remainder, then ascending by index + b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0)) + }); + let mut k = 0usize; + while leftover > 0 && k < remainders.len() { + let idx = remainders[k].0; + if let Some((_, amt)) = shares.get_mut(idx) { + *amt = amt.saturating_add(1); + } + leftover = leftover.saturating_sub(1); + k = k.saturating_add(1); } } } else { - // Even split. - let base_u128 = total_alpha_u128.checked_div(n).unwrap_or(0); - let mut remainder_u128 = total_alpha_u128.checked_rem(n).unwrap_or(0); - - let base: u64 = u64::try_from(base_u128).unwrap_or(u64::MAX); - for hk in hotkeys.into_iter() { - let add_one: u64 = if remainder_u128 > 0 { - remainder_u128 = remainder_u128.saturating_sub(1); - 1 - } else { - 0 - }; - shares.push((hk, base.saturating_add(add_one))); + // 3b) Even split with deterministic round‑robin remainder + let base = total_u128.checked_div(n as u128).unwrap_or(0); + let mut rem = total_u128.checked_rem(n as u128).unwrap_or(0); + for hk in hotkeys.iter() { + let mut amt = u64::try_from(base).unwrap_or(u64::MAX); + if rem > 0 { + amt = amt.saturating_add(1); + rem = rem.saturating_sub(1); + } + shares.push((hk.clone(), amt)); } } - // 4) Deposit to (coldkey, hotkey). On failure, collect leftover and retry on successes. + // 4) Deposit to (coldkey, each hotkey). Track leftover if any deposit fails. let mut leftover: u64 = 0; let mut successes: sp_std::vec::Vec = sp_std::vec::Vec::new(); @@ -1304,7 +1305,7 @@ impl Pallet { Ok(_) => successes.push(hk.clone()), Err(e) => { log::warn!( - "refund_alpha_to_hotkeys: increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={amt_u64:?}): {e:?}" + "refund_alpha: increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={amt_u64:?}): {e:?}" ); leftover = leftover.saturating_add(*amt_u64); } @@ -1313,10 +1314,11 @@ impl Pallet { // 5) Retry: spread any leftover across the hotkeys that succeeded in step 4. if leftover > 0 && !successes.is_empty() { - let count_u64 = successes.len() as u64; - let base = leftover.checked_div(count_u64).unwrap_or(0); - let mut rem = leftover.checked_rem(count_u64).unwrap_or(0); + let count = successes.len() as u64; + let base = leftover.checked_div(count).unwrap_or(0); + let mut rem = leftover.checked_rem(count).unwrap_or(0); + let mut leftover_retry: u64 = 0; for hk in successes.iter() { let add: u64 = base.saturating_add(if rem > 0 { rem = rem.saturating_sub(1); @@ -1327,12 +1329,17 @@ impl Pallet { if add == 0 { continue; } - let _ = T::BalanceOps::increase_stake(coldkey, hk, netuid, add.into()); + if let Err(e) = T::BalanceOps::increase_stake(coldkey, hk, netuid, add.into()) { + log::warn!( + "refund_alpha(retry): increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={add:?}): {e:?}" + ); + leftover_retry = leftover_retry.saturating_add(add); + } } - leftover = 0; + leftover = leftover_retry; } - // 6) Final fallback: if for some reason every deposit failed, deposit to (coldkey, coldkey). + // 6) Final fallback: deposit any remainder to (coldkey, coldkey). if leftover > 0 { let _ = T::BalanceOps::increase_stake(coldkey, coldkey, netuid, leftover.into()); } From 70ec2ee9ae7fd60a204ea61da03d6852127ccc86 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 15:48:13 -0700 Subject: [PATCH 085/379] add more dissolve LP tests --- pallets/swap/src/pallet/tests.rs | 189 +++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index e6148ed94e..863438c627 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -2276,3 +2276,192 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { assert!(!SwapV3Initialized::::contains_key(netuid)); }); } + +#[test] +fn refund_alpha_single_provider_exact() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(11); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // --- Create an alpha‑only position (range entirely above current tick → TAO = 0, ALPHA > 0). + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + let liquidity = 1_000_000_u64; + let (_pos_id, tao_needed, alpha_needed) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add alpha-only liquidity"); + assert_eq!(tao_needed, 0, "alpha‑only position must not require TAO"); + assert!(alpha_needed > 0, "alpha‑only position must require ALPHA"); + + // --- Snapshot BEFORE we withdraw funds (baseline for conservation). + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_total = alpha_before_hot + alpha_before_owner; + + // --- Mimic extrinsic bookkeeping: withdraw α and record provided reserve. + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + alpha_needed.into(), + ) + .expect("decrease ALPHA"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // --- Act: dissolve (calls refund_alpha inside). + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // --- Assert: refunded back to the owner (may credit to (cold,cold)). + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "ALPHA principal must be conserved to the owner" + ); + + // --- State is cleared. + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + assert!(!SwapV3Initialized::::contains_key(netuid)); + }); +} + +#[test] +fn refund_alpha_multiple_providers_proportional_to_principal() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(12); + let c1 = OK_COLDKEY_ACCOUNT_ID; + let h1 = OK_HOTKEY_ACCOUNT_ID; + let c2 = OK_COLDKEY_ACCOUNT_ID_2; + let h2 = OK_HOTKEY_ACCOUNT_ID_2; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Use the same "above current tick" trick for alpha‑only positions. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + // Provider #1 (smaller α) + let liq1 = 700_000_u64; + let (_p1, t1, a1) = + Pallet::::do_add_liquidity(netuid, &c1, &h1, tick_low, tick_high, liq1) + .expect("add alpha-only liquidity #1"); + assert_eq!(t1, 0); + assert!(a1 > 0); + + // Provider #2 (larger α) + let liq2 = 2_100_000_u64; + let (_p2, t2, a2) = + Pallet::::do_add_liquidity(netuid, &c2, &h2, tick_low, tick_high, liq2) + .expect("add alpha-only liquidity #2"); + assert_eq!(t2, 0); + assert!(a2 > 0); + + // Baselines BEFORE withdrawing + let a1_before_hot = ::BalanceOps::alpha_balance(netuid.into(), &c1, &h1); + let a1_before_owner = ::BalanceOps::alpha_balance(netuid.into(), &c1, &c1); + let a1_before = a1_before_hot + a1_before_owner; + + let a2_before_hot = ::BalanceOps::alpha_balance(netuid.into(), &c2, &h2); + let a2_before_owner = ::BalanceOps::alpha_balance(netuid.into(), &c2, &c2); + let a2_before = a2_before_hot + a2_before_owner; + + // Withdraw α and account reserves for each provider. + let a1_taken = + ::BalanceOps::decrease_stake(&c1, &h1, netuid.into(), a1.into()) + .expect("decrease α #1"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a1_taken); + + let a2_taken = + ::BalanceOps::decrease_stake(&c2, &h2, netuid.into(), a2.into()) + .expect("decrease α #2"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), a2_taken); + + // Act + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // Each owner is restored to their exact baseline. + let a1_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &c1, &h1); + let a1_after_owner = ::BalanceOps::alpha_balance(netuid.into(), &c1, &c1); + let a1_after = a1_after_hot + a1_after_owner; + assert_eq!( + a1_after, a1_before, + "owner #1 must receive their α principal back" + ); + + let a2_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &c2, &h2); + let a2_after_owner = ::BalanceOps::alpha_balance(netuid.into(), &c2, &c2); + let a2_after = a2_after_hot + a2_after_owner; + assert_eq!( + a2_after, a2_before, + "owner #2 must receive their α principal back" + ); + }); +} + +#[test] +fn refund_alpha_same_cold_multiple_hotkeys_conserved_to_owner() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(13); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot1 = OK_HOTKEY_ACCOUNT_ID; + let hot2 = OK_HOTKEY_ACCOUNT_ID_2; + + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + + // Two alpha‑only positions on different hotkeys of the same owner. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.next().expect("current tick should not be MAX in tests"); + let tick_high = TickIndex::MAX; + + let (_p1, _t1, a1) = + Pallet::::do_add_liquidity(netuid, &cold, &hot1, tick_low, tick_high, 900_000) + .expect("add alpha-only pos (hot1)"); + let (_p2, _t2, a2) = + Pallet::::do_add_liquidity(netuid, &cold, &hot2, tick_low, tick_high, 1_500_000) + .expect("add alpha-only pos (hot2)"); + assert!(a1 > 0 && a2 > 0); + + // Baseline BEFORE: sum over (cold,hot1) + (cold,hot2) + (cold,cold). + let before_hot1 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot1); + let before_hot2 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot2); + let before_owner = ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let before_total = before_hot1 + before_hot2 + before_owner; + + // Withdraw α from both hotkeys; track provided‑reserve. + let t1 = + ::BalanceOps::decrease_stake(&cold, &hot1, netuid.into(), a1.into()) + .expect("decr α #hot1"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t1); + + let t2 = + ::BalanceOps::decrease_stake(&cold, &hot2, netuid.into(), a2.into()) + .expect("decr α #hot2"); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), t2); + + // Act + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // The total α "owned" by the coldkey is conserved (credit may land on (cold,cold)). + let after_hot1 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot1); + let after_hot2 = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot2); + let after_owner = ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let after_total = after_hot1 + after_hot2 + after_owner; + + assert_eq!( + after_total, before_total, + "owner’s α must be conserved across hot ledgers + (owner,owner)" + ); + }); +} From 8d838c443f71b9e77d0cdbf3014c6a22fdde4ce5 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:15:52 -0700 Subject: [PATCH 086/379] use moving price --- pallets/subtensor/src/coinbase/root.rs | 2 +- pallets/subtensor/src/tests/networks.rs | 62 +++++++++---------------- 2 files changed, 23 insertions(+), 41 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index b37d655d6f..1e4be4a212 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -799,7 +799,7 @@ impl Pallet { continue; } - let price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); + let price: U96F32 = Self::get_moving_alpha_price(netuid); // If tie on price, earliest registration wins. if price < candidate_price diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 914a19e230..4c00266587 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -3,7 +3,6 @@ use crate::migrations::migrate_network_immunity_period; use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; -use pallet_subtensor_swap::{AlphaSqrtPrice, SwapV3Initialized}; use sp_core::U256; use sp_std::collections::btree_map::BTreeMap; use substrate_fixed::types::{I96F32, U64F64, U96F32}; @@ -1007,38 +1006,22 @@ fn prune_selection_complex_state_exhaustive() { System::set_block_number(imm + 6); let n6 = add_dynamic_network(&U256::from(106), &U256::from(206)); // immune at first - // (Root is ignored by the selector; we may still set it for completeness.) + // (Root is ignored by the selector.) let root = NetUid::ROOT; // --------------------------------------------------------------------- - // Drive price via V3 sqrt-price path: price = (sqrt_price)^2 - // Ensure V3 is initialized so current_alpha_price() uses sqrt-price. + // Drive pruning via the EMA/moving price used by `get_network_to_prune()`. + // We set the moving prices directly to create deterministic selections. + // + // Intended prices: + // n1: 25, n2: 25, n3: 100, n4: 1, n5: 0 (immune initially), n6: 0 (immune initially) // --------------------------------------------------------------------- - for net in [n1, n2, n3, n4, n5, n6] { - assert_ok!( - pallet_subtensor_swap::Pallet::::toggle_user_liquidity( - RuntimeOrigin::root(), - net, - true - ) - ); - SwapV3Initialized::::insert(net, true); - } - - // sqrt prices → prices: - // n1: sqrt=5 → price 25 - // n2: sqrt=5 → price 25 - // n3: sqrt=10 → price 100 - // n4: sqrt=1 → price 1 (lowest among matured initially) - // n5: sqrt=0 → price 0 (lowest overall but immune initially) - // n6: sqrt=0 → price 0 (lowest overall but immune initially) - AlphaSqrtPrice::::insert(n1, U64F64::from_num(5)); - AlphaSqrtPrice::::insert(n2, U64F64::from_num(5)); - AlphaSqrtPrice::::insert(n3, U64F64::from_num(10)); - AlphaSqrtPrice::::insert(n4, U64F64::from_num(1)); - AlphaSqrtPrice::::insert(n5, U64F64::from_num(0)); - AlphaSqrtPrice::::insert(n6, U64F64::from_num(0)); - AlphaSqrtPrice::::insert(root, U64F64::from_num(0)); + SubnetMovingPrice::::insert(n1, I96F32::from_num(25)); + SubnetMovingPrice::::insert(n2, I96F32::from_num(25)); + SubnetMovingPrice::::insert(n3, I96F32::from_num(100)); + SubnetMovingPrice::::insert(n4, I96F32::from_num(1)); + SubnetMovingPrice::::insert(n5, I96F32::from_num(0)); + SubnetMovingPrice::::insert(n6, I96F32::from_num(0)); // --------------------------------------------------------------------- // Phase A: Only n1..n4 are mature → lowest price (n4=1) should win. @@ -1052,11 +1035,11 @@ fn prune_selection_complex_state_exhaustive() { // --------------------------------------------------------------------- // Phase B: Tie on price with *same registration time* (n1 vs n2). - // Raise n4's price to 25 (sqrt=5) so {n1=25, n2=25, n3=100, n4=25}. + // Raise n4's price to 25 so {n1=25, n2=25, n3=100, n4=25}. // n1 and n2 share the *same registered_at*. The tie should keep the // first encountered (stable iteration by key order) → n1. // --------------------------------------------------------------------- - AlphaSqrtPrice::::insert(n4, U64F64::from_num(5)); // price now 25 + SubnetMovingPrice::::insert(n4, I96F32::from_num(25)); // n4 now 25 assert_eq!( SubtensorModule::get_network_to_prune(), Some(n1), @@ -1065,10 +1048,10 @@ fn prune_selection_complex_state_exhaustive() { // --------------------------------------------------------------------- // Phase C: Tie on price with *different registration times*. - // Make n3 price=25 as well (sqrt=5). Now n1,n2,n3,n4 all have price=25. - // Earliest registration time among them is n1 (block 0). + // Make n3 price=25 as well. Now n1,n2,n3,n4 all have price=25. + // Earliest registration among them is n1 (block 0). // --------------------------------------------------------------------- - AlphaSqrtPrice::::insert(n3, U64F64::from_num(5)); // price now 25 + SubnetMovingPrice::::insert(n3, I96F32::from_num(25)); assert_eq!( SubtensorModule::get_network_to_prune(), Some(n1), @@ -1140,10 +1123,10 @@ fn prune_selection_complex_state_exhaustive() { // --------------------------------------------------------------------- // Phase H: Dynamic price changes. - // Make n6 expensive (sqrt=10 → price=100); make n3 cheapest (sqrt=1 → price=1). + // Make n6 expensive (price 100); make n3 cheapest (price 1). // --------------------------------------------------------------------- - AlphaSqrtPrice::::insert(n6, U64F64::from_num(10)); // price 100 - AlphaSqrtPrice::::insert(n3, U64F64::from_num(1)); // price 1 + SubnetMovingPrice::::insert(n6, I96F32::from_num(100)); + SubnetMovingPrice::::insert(n3, I96F32::from_num(1)); assert_eq!( SubtensorModule::get_network_to_prune(), Some(n3), @@ -1155,7 +1138,7 @@ fn prune_selection_complex_state_exhaustive() { // Give n2 the same price as n3; n2 registered at block 0, n3 at block 1. // n2 should be chosen. // --------------------------------------------------------------------- - AlphaSqrtPrice::::insert(n2, U64F64::from_num(1)); // price 1 + SubnetMovingPrice::::insert(n2, I96F32::from_num(1)); assert_eq!( SubtensorModule::get_network_to_prune(), Some(n2), @@ -1174,8 +1157,7 @@ fn prune_selection_complex_state_exhaustive() { ); NetworksAdded::::insert(n2, true); - // Root is always ignored even if cheapest. - AlphaSqrtPrice::::insert(root, U64F64::from_num(0)); + // Root is always ignored even if cheapest (get_moving_alpha_price returns 1 for ROOT). assert_ne!( SubtensorModule::get_network_to_prune(), Some(root), From 53d7dbc7d90f5e1386f48938c087963b44860bc8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 25 Aug 2025 01:14:00 +0000 Subject: [PATCH 087/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 190e9d6012..4c891828a4 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -585,7 +585,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(340_400_000, 0) + #[pallet::weight((Weight::from_parts(414_200_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -1195,7 +1195,7 @@ mod dispatches { #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().writes(59_u64)), DispatchClass::Normal, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1540,7 +1540,7 @@ mod dispatches { #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) .saturating_add(T::DbWeight::get().reads(36_u64)) - .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().writes(58_u64)), DispatchClass::Normal, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -2156,7 +2156,7 @@ mod dispatches { /// Emits a `SymbolUpdated` event on success. #[pallet::call_index(112)] #[pallet::weight(( - Weight::from_parts(26_880_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), + Weight::from_parts(46_040_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), DispatchClass::Operational, Pays::Yes ))] From 5c46edb364f4e70415ec562da486dab7f83241a3 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 25 Aug 2025 13:01:14 -0700 Subject: [PATCH 088/379] remove duplicates --- pallets/subtensor/src/coinbase/root.rs | 44 ++++---------------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 1e4be4a212..089b02d910 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -448,7 +448,7 @@ impl Pallet { IsNetworkMember::::remove(key, netuid); } - // --- 11. Core per-net parameters (already present + a few that were missing). + // --- 11. Core per-net parameters. Tempo::::remove(netuid); Kappa::::remove(netuid); Difficulty::::remove(netuid); @@ -461,21 +461,16 @@ impl Pallet { POWRegistrationsThisInterval::::remove(netuid); BurnRegistrationsThisInterval::::remove(netuid); - // --- 12. AMM / price / accounting (expanded). - SubnetTAO::::remove(netuid); + // --- 12. AMM / price / accounting. + // SubnetTAO, SubnetAlpha{In,InProvided,Out} are already cleared during dissolve/destroy. SubnetAlphaInEmission::::remove(netuid); SubnetAlphaOutEmission::::remove(netuid); SubnetTaoInEmission::::remove(netuid); SubnetVolume::::remove(netuid); SubnetMovingPrice::::remove(netuid); - - // Additional AMM & pool surfaces that can exist independently of dissolve paths: - SubnetAlphaIn::::remove(netuid); - SubnetAlphaInProvided::::remove(netuid); - SubnetAlphaOut::::remove(netuid); SubnetTaoProvided::::remove(netuid); - // --- 13. Token / mechanism / registration toggles that were previously left behind. + // --- 13. Token / mechanism / registration toggles. TokenSymbol::::remove(netuid); SubnetMechanism::::remove(netuid); SubnetOwnerHotkey::::remove(netuid); @@ -536,7 +531,7 @@ impl Pallet { StakeWeight::::remove(netuid); LoadedEmission::::remove(netuid); - // --- 19. DMAPs where netuid is the FIRST key: can clear by prefix. + // --- 19. DMAPs where netuid is the FIRST key: clear by prefix. let _ = BlockAtRegistration::::clear_prefix(netuid, u32::MAX, None); let _ = Axons::::clear_prefix(netuid, u32::MAX, None); let _ = NeuronCertificates::::clear_prefix(netuid, u32::MAX, None); @@ -602,15 +597,9 @@ impl Pallet { LastHotkeyEmissionOnNetuid::::remove(&hot, netuid); } } - // TotalHotkeyAlpha / TotalHotkeyAlphaLastEpoch / TotalHotkeyShares: (hot, netuid) → ... + // TotalHotkeyAlphaLastEpoch: (hot, netuid) → ... + // (TotalHotkeyAlpha and TotalHotkeyShares were already removed during dissolve.) { - let to_rm_alpha: sp_std::vec::Vec = TotalHotkeyAlpha::::iter() - .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) - .collect(); - for hot in to_rm_alpha { - TotalHotkeyAlpha::::remove(&hot, netuid); - } - let to_rm_alpha_last: sp_std::vec::Vec = TotalHotkeyAlphaLastEpoch::::iter() .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) @@ -618,24 +607,6 @@ impl Pallet { for hot in to_rm_alpha_last { TotalHotkeyAlphaLastEpoch::::remove(&hot, netuid); } - - let to_rm_shares: sp_std::vec::Vec = TotalHotkeyShares::::iter() - .filter_map(|(hot, n, _)| if n == netuid { Some(hot) } else { None }) - .collect(); - for hot in to_rm_shares { - TotalHotkeyShares::::remove(&hot, netuid); - } - } - // Alpha shares NMAP: (hot, cold, netuid) → U64F64 - { - let to_rm: sp_std::vec::Vec<(T::AccountId, T::AccountId)> = Alpha::::iter() - .filter_map( - |((hot, cold, n), _)| if n == netuid { Some((hot, cold)) } else { None }, - ) - .collect(); - for (hot, cold) in to_rm { - Alpha::::remove((hot, cold, netuid)); - } } // TransactionKeyLastBlock NMAP: (hot, netuid, name) → u64 { @@ -674,7 +645,6 @@ impl Pallet { log::debug!( "remove_network: netuid={netuid}, owner={owner_coldkey:?} removed successfully" ); - Self::deposit_event(Event::NetworkRemoved(netuid)); } #[allow(clippy::arithmetic_side_effects)] From 576720ec16d4edfc08301c9112fbba037e834d30 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 25 Aug 2025 13:45:06 -0700 Subject: [PATCH 089/379] remove duplicate check --- pallets/subtensor/src/coinbase/root.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 089b02d910..8f6d3c3530 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -758,9 +758,6 @@ impl Pallet { if !added || netuid == NetUid::ROOT { continue; } - if !Self::if_subnet_exist(netuid) { - continue; - } let registered_at = NetworkRegisteredAt::::get(netuid); From f4f5add76e8aaa04b8bccf08bfcf20ff78f8f22d Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 25 Aug 2025 14:31:30 -0700 Subject: [PATCH 090/379] improve efficiency --- pallets/swap/src/pallet/impls.rs | 18 ++++++------------ pallets/swap/src/pallet/tests.rs | 4 ++-- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index fb86c69e2b..16b9a58c53 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1362,23 +1362,22 @@ impl Pallet { let mechid = T::SubnetInfo::mechanism(netuid.into()); let v3_initialized = SwapV3Initialized::::get(netuid); let user_lp_enabled = - >::is_user_liquidity_enabled(netuid); + >::is_user_liquidity_enabled(netuid); let is_v3_mode = mechid == 1 && v3_initialized; if is_v3_mode { // -------- V3: close every position, aggregate refunds, clear state -------- - // 1) Snapshot all (owner, position_id) under this netuid to avoid iterator aliasing. + // 1) Snapshot all (owner, position_id). struct CloseItem { owner: A, pos_id: PositionId, } let mut to_close: sp_std::vec::Vec> = sp_std::vec::Vec::new(); - for ((n, owner, pos_id), _pos) in Positions::::iter() { - if n == netuid { - to_close.push(CloseItem { owner, pos_id }); - } + + for ((owner, pos_id), _pos) in Positions::::iter_prefix((netuid,)) { + to_close.push(CloseItem { owner, pos_id }); } let protocol_account = Self::protocol_account_id(); @@ -1428,14 +1427,10 @@ impl Pallet { ActiveTickIndexManager::::remove(netuid, ti); } - // 5) Clear storage: - // Positions (StorageNMap) – prefix is **(netuid,)** not just netuid. + // 5) Clear storage for this netuid. let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); - - // Ticks (DoubleMap) – OK to pass netuid as first key. let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); - // Fee globals, price/tick/liquidity, v3 init flag. FeeGlobalTao::::remove(netuid); FeeGlobalAlpha::::remove(netuid); CurrentLiquidity::::remove(netuid); @@ -1443,7 +1438,6 @@ impl Pallet { AlphaSqrtPrice::::remove(netuid); SwapV3Initialized::::remove(netuid); - // Active tick bitmap words (StorageNMap) – prefix is **(netuid,)**. let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); FeeRate::::remove(netuid); EnabledUserLiquidity::::remove(netuid); diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 863438c627..25ffbc1189 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -2295,8 +2295,8 @@ fn refund_alpha_single_provider_exact() { let (_pos_id, tao_needed, alpha_needed) = Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) .expect("add alpha-only liquidity"); - assert_eq!(tao_needed, 0, "alpha‑only position must not require TAO"); - assert!(alpha_needed > 0, "alpha‑only position must require ALPHA"); + assert_eq!(tao_needed, 0, "alpha-only position must not require TAO"); + assert!(alpha_needed > 0, "alpha-only position must require ALPHA"); // --- Snapshot BEFORE we withdraw funds (baseline for conservation). let alpha_before_hot = From 890f5b90177471614669dfe3b6d6632bc1d4e426 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 25 Aug 2025 15:46:17 -0700 Subject: [PATCH 091/379] migrate_subnet_limit_to_default --- pallets/subtensor/src/macros/hooks.rs | 4 +- .../migrate_subnet_limit_to_default.rs | 47 ++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/tests/migration.rs | 49 +++++++++++++++++++ 4 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index cff92c17dd..d5037a2c60 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -135,7 +135,9 @@ mod hooks { // Migrate to fix root counters .saturating_add(migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::()) // Migrate Immunity Period - .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()); + .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()) + // Migrate Subnet Limit + .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs new file mode 100644 index 0000000000..d557589c88 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs @@ -0,0 +1,47 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_subnet_limit_to_default() -> Weight { + let mig_name: Vec = b"subnet_limit_to_default".to_vec(); + + // 1 read: HasMigrationRun flag + let mut total_weight = T::DbWeight::get().reads(1); + + // Run once guard + if HasMigrationRun::::get(&mig_name) { + log::info!( + "Migration '{}' already executed - skipping", + String::from_utf8_lossy(&mig_name) + ); + return total_weight; + } + log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); + + // Read current and compute target default + let current: u16 = SubnetLimit::::get(); + let target: u16 = DefaultSubnetLimit::::get(); + + if current != target { + total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + SubnetLimit::::put(target); + log::info!("SubnetLimit updated: {} -> {}", current, target); + } else { + total_weight = total_weight.saturating_add(T::DbWeight::get().reads(1)); + log::info!( + "SubnetLimit already equals default ({}), no update performed.", + target + ); + } + + // Mark as done + HasMigrationRun::::insert(&mig_name, true); + total_weight = total_weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + "Migration '{}' completed", + String::from_utf8_lossy(&mig_name) + ); + total_weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index cc1b4dad9d..e0dd295b7e 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -37,6 +37,7 @@ pub mod migrate_set_registration_enable; pub mod migrate_set_subtoken_enabled; pub mod migrate_stake_threshold; pub mod migrate_subnet_identities_to_v3; +pub mod migrate_subnet_limit_to_default; pub mod migrate_subnet_symbols; pub mod migrate_subnet_volume; pub mod migrate_to_v1_separate_emission; diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index fbe8825a5f..532c9cb062 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1252,3 +1252,52 @@ fn test_migrate_crv3_v2_to_timelocked() { assert_eq!(round2, round); }); } + +#[test] +fn test_migrate_subnet_limit_to_default() { + new_test_ext(1).execute_with(|| { + // ------------------------------ + // 0. Constants / helpers + // ------------------------------ + const MIG_NAME: &[u8] = b"subnet_limit_to_default"; + + // Compute a non-default value safely + let default: u16 = DefaultSubnetLimit::::get(); + let not_default: u16 = default.wrapping_add(1); + + // ------------------------------ + // 1. Pre-state: ensure a non-default value is stored + // ------------------------------ + SubnetLimit::::put(not_default); + assert_eq!( + SubnetLimit::::get(), + not_default, + "precondition failed: SubnetLimit should be non-default before migration" + ); + + assert!( + !HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag should be false before run" + ); + + // ------------------------------ + // 2. Run migration + // ------------------------------ + let w = crate::migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::(); + assert!(!w.is_zero(), "weight must be non-zero"); + + // ------------------------------ + // 3. Verify results + // ------------------------------ + assert!( + HasMigrationRun::::get(MIG_NAME.to_vec()), + "migration flag not set" + ); + + assert_eq!( + SubnetLimit::::get(), + default, + "SubnetLimit should be reset to the configured default" + ); + }); +} From 4052094b32a4fd07e21a36e1a9930f6ca3f684e3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 26 Aug 2025 00:45:14 +0000 Subject: [PATCH 092/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 4c891828a4..8e90cbed33 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -777,7 +777,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(32_310_000, 0) + #[pallet::weight((Weight::from_parts(41_320_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( @@ -2156,7 +2156,7 @@ mod dispatches { /// Emits a `SymbolUpdated` event on success. #[pallet::call_index(112)] #[pallet::weight(( - Weight::from_parts(46_040_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), + Weight::from_parts(26_930_000, 0).saturating_add(T::DbWeight::get().reads_writes(4, 1)), DispatchClass::Operational, Pays::Yes ))] From ded32d4fb8b224ce43476288957242a7d56a4854 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 26 Aug 2025 08:48:06 -0700 Subject: [PATCH 093/379] clippy --- .../src/migrations/migrate_subnet_limit_to_default.rs | 7 ++----- pallets/swap/src/pallet/impls.rs | 7 ++++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs index d557589c88..3d88337a24 100644 --- a/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs +++ b/pallets/subtensor/src/migrations/migrate_subnet_limit_to_default.rs @@ -26,13 +26,10 @@ pub fn migrate_subnet_limit_to_default() -> Weight { if current != target { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); SubnetLimit::::put(target); - log::info!("SubnetLimit updated: {} -> {}", current, target); + log::info!("SubnetLimit updated: {current} -> {target}"); } else { total_weight = total_weight.saturating_add(T::DbWeight::get().reads(1)); - log::info!( - "SubnetLimit already equals default ({}), no update performed.", - target - ); + log::info!("SubnetLimit already equals default ({target}), no update performed."); } // Mark as done diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 16b9a58c53..30a967cd69 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1270,9 +1270,10 @@ impl Pallet { }); let mut k = 0usize; while leftover > 0 && k < remainders.len() { - let idx = remainders[k].0; - if let Some((_, amt)) = shares.get_mut(idx) { - *amt = amt.saturating_add(1); + if let Some((idx, _)) = remainders.get(k) { + if let Some((_, amt)) = shares.get_mut(*idx) { + *amt = amt.saturating_add(1); + } } leftover = leftover.saturating_sub(1); k = k.saturating_add(1); From 66a07b429d404555a3ba7ce5bcdb907fdd854271 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 26 Aug 2025 10:30:10 -0700 Subject: [PATCH 094/379] add get_subnet_to_prune rpc --- pallets/subtensor/rpc/src/lib.rs | 17 +++++++++++++++++ pallets/subtensor/runtime-api/src/lib.rs | 1 + runtime/src/lib.rs | 3 +++ 3 files changed, 21 insertions(+) diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index e3d5d8f1c1..2f59bb5cdd 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -83,6 +83,8 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; + #[method(name = "subnetInfo_getSubnetToPrune")] + fn get_subnet_to_prune(&self, at: Option) -> RpcResult>; } pub struct SubtensorCustom { @@ -427,4 +429,19 @@ where } } } + + fn get_subnet_to_prune( + &self, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_subnet_to_prune(at) { + Ok(result) => Ok(result), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get subnet to prune: {e:?}")).into()) + } + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 42d12eb686..1a9c5dc09e 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -43,6 +43,7 @@ sp_api::decl_runtime_apis! { fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; + fn get_subnet_to_prune() -> Option; } pub trait StakeInfoRuntimeApi { diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e4f304feda..f90f16f0af 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -2334,6 +2334,9 @@ impl_runtime_apis! { fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option> { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } + fn get_subnet_to_prune() -> Option { + pallet_subtensor::Pallet::::get_network_to_prune() + } } From 2c447fd99f46be260f4dd7d3488c52786a6f6c08 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 26 Aug 2025 18:54:59 -0400 Subject: [PATCH 095/379] Refactored epoch, tests failing --- common/src/lib.rs | 122 +++ pallets/admin-utils/src/tests/mod.rs | 9 +- pallets/subtensor/src/coinbase/root.rs | 29 +- pallets/subtensor/src/epoch/run_epoch.rs | 858 ++++++++++++++++-- pallets/subtensor/src/lib.rs | 30 +- pallets/subtensor/src/macros/dispatches.rs | 205 +++++ pallets/subtensor/src/macros/events.rs | 2 +- pallets/subtensor/src/macros/genesis.rs | 4 +- .../migrations/migrate_delete_subnet_21.rs | 10 +- .../src/migrations/migrate_delete_subnet_3.rs | 10 +- pallets/subtensor/src/rpc_info/metagraph.rs | 17 +- pallets/subtensor/src/rpc_info/neuron_info.rs | 14 +- pallets/subtensor/src/rpc_info/show_subnet.rs | 6 +- pallets/subtensor/src/rpc_info/subnet_info.rs | 6 +- pallets/subtensor/src/subnets/subsubnet.rs | 179 +++- pallets/subtensor/src/subnets/uids.rs | 14 +- pallets/subtensor/src/subnets/weights.rs | 654 ++++++++----- pallets/subtensor/src/swap/swap_hotkey.rs | 15 +- pallets/subtensor/src/tests/children.rs | 9 +- pallets/subtensor/src/tests/coinbase.rs | 22 +- pallets/subtensor/src/tests/consensus.rs | 3 +- pallets/subtensor/src/tests/epoch.rs | 77 +- pallets/subtensor/src/tests/staking.rs | 10 +- pallets/subtensor/src/tests/subsubnet.rs | 3 +- pallets/subtensor/src/tests/swap_hotkey.rs | 15 +- .../src/tests/swap_hotkey_with_subnet.rs | 15 +- pallets/subtensor/src/tests/uids.rs | 14 +- pallets/subtensor/src/tests/weights.rs | 45 +- pallets/subtensor/src/utils/misc.rs | 15 +- 29 files changed, 1894 insertions(+), 518 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index ed7cc0fe88..2fd9ca30e6 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -245,6 +245,10 @@ pub mod time { #[serde(transparent)] pub struct SubId(u8); +impl SubId { + pub const MAIN: SubId = Self(0); +} + impl From for SubId { fn from(value: u8) -> Self { Self(value) @@ -257,6 +261,124 @@ impl From for u16 { } } +impl From for u64 { + fn from(val: SubId) -> Self { + u64::from(val.0) + } +} + +impl From for u8 { + fn from(val: SubId) -> Self { + u8::from(val.0) + } +} + +impl Display for SubId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for SubId { + type As = u8; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for SubId { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl TypeInfo for SubId { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + +#[freeze_struct("2d995c5478e16d4d")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct NetUidStorageIndex(u16); + +impl NetUidStorageIndex { + pub const ROOT: NetUidStorageIndex = Self(0); +} + +impl Display for NetUidStorageIndex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for NetUidStorageIndex { + type As = u16; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for NetUidStorageIndex { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl From for NetUidStorageIndex { + fn from(val: NetUid) -> Self { + val.0.into() + } +} + +impl From for u16 { + fn from(val: NetUidStorageIndex) -> Self { + val.0 + } +} + +impl From for NetUidStorageIndex { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl TypeInfo for NetUidStorageIndex { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 754befc805..2e85457231 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -827,7 +827,7 @@ fn test_sudo_set_bonds_moving_average() { let netuid = NetUid::from(1); let to_be_set: u64 = 10; add_network(netuid, 10); - let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid); + let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid.into()); assert_eq!( AdminUtils::sudo_set_bonds_moving_average( <::RuntimeOrigin>::signed(U256::from(1)), @@ -845,7 +845,7 @@ fn test_sudo_set_bonds_moving_average() { Err(Error::::SubnetDoesNotExist.into()) ); assert_eq!( - SubtensorModule::get_bonds_moving_average(netuid), + SubtensorModule::get_bonds_moving_average(netuid.into()), init_value ); assert_ok!(AdminUtils::sudo_set_bonds_moving_average( @@ -853,7 +853,10 @@ fn test_sudo_set_bonds_moving_average() { netuid, to_be_set )); - assert_eq!(SubtensorModule::get_bonds_moving_average(netuid), to_be_set); + assert_eq!( + SubtensorModule::get_bonds_moving_average(netuid.into()), + to_be_set + ); }); } diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index fe1878f397..6d2824aec9 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -17,12 +17,11 @@ use super::*; use frame_support::dispatch::Pays; -use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { /// Fetches the total count of root network validators @@ -410,6 +409,7 @@ impl Pallet { // --- 1. Return balance to subnet owner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let reserved_amount = Self::get_subnet_locked_balance(netuid); + let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -430,17 +430,16 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); // --- 8. Removes the weights for this subnet (do not remove). - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 9. Iterate over stored weights and fill the matrix. - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - NetUid::ROOT, - ) - { + for (uid_i, weights_i) in Weights::::iter_prefix(NetUidStorageIndex::ROOT) { // Create a new vector to hold modified weights. let mut modified_weights = weights_i.clone(); // Iterate over each weight entry to potentially update it. @@ -450,7 +449,7 @@ impl Pallet { *weight = 0; // Set weight to 0 for the matching subnet_id. } } - Weights::::insert(NetUid::ROOT, uid_i, modified_weights); + Weights::::insert(NetUidStorageIndex::ROOT, uid_i, modified_weights); } // --- 10. Remove various network-related parameters. @@ -458,11 +457,17 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::remove(netuid_index); + } Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + LastUpdate::::remove(netuid_index); + } ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 2f302c2a5e..fc9bbd070f 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,19 +1,155 @@ use super::*; use crate::epoch::math::*; +use alloc::collections::BTreeMap; use frame_support::IterableStorageDoubleMap; use safe_math::*; +use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; + +#[derive(Debug, Default)] +pub struct EpochTerms { + pub uid: usize, + pub dividend: u16, + pub incentive: u16, + pub validator_emission: AlphaCurrency, + pub server_emission: AlphaCurrency, + pub stake_weight: u16, + pub active: bool, + pub emission: AlphaCurrency, + pub rank: u16, + pub trust: u16, + pub consensus: u16, + pub pruning_score: u16, + pub validator_trust: u16, + pub new_validator_permit: bool, + pub bond: Vec<(u16, u16)>, +} + +pub struct EpochOutput(pub BTreeMap); + +impl EpochOutput { + pub fn as_map(&self) -> &BTreeMap { + &self.0 + } +} + +impl IntoIterator for EpochOutput +where + T: frame_system::Config, + T::AccountId: Ord, +{ + type Item = (T::AccountId, EpochTerms); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[macro_export] +macro_rules! extract_from_sorted_terms { + ($sorted:expr, $field:ident) => {{ + ($sorted) + .iter() + .copied() + .map(|t| t.$field) + .collect::>() + }}; +} impl Pallet { + /// Legacy epoch function interface (TODO: Is only used for tests, remove) + pub fn epoch( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Run subsubnet-style epoch + let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); + + // Persist values in legacy format + Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, &output.as_map()); + Self::persist_netuid_epoch_terms(netuid, &output.as_map()); + + // Remap and return + output + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .collect() + } + + /// Legacy epoch_dense function interface (TODO: Is only used for tests, remove) + pub fn epoch_dense( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + Self::epoch_dense_subsubnet(netuid, SubId::MAIN, rao_emission) + } + + /// Persists per-subsubnet epoch output in state + pub fn persist_subsub_epoch_terms( + netuid: NetUid, + subid: SubId, + output: &BTreeMap, + ) { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let incentive = extract_from_sorted_terms!(terms_sorted, incentive); + let bonds: Vec> = terms_sorted + .iter() + .cloned() + .map(|t| t.bond.clone()) + .collect::>(); + + Incentive::::insert(netuid_index, incentive); + bonds.into_iter().enumerate().for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize + .try_into() + .unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); + } + + /// Persists per-netuid epoch output in state + pub fn persist_netuid_epoch_terms(netuid: NetUid, output: &BTreeMap) { + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let active = extract_from_sorted_terms!(terms_sorted, active); + let emission = extract_from_sorted_terms!(terms_sorted, emission); + let rank = extract_from_sorted_terms!(terms_sorted, rank); + let trust = extract_from_sorted_terms!(terms_sorted, trust); + let consensus = extract_from_sorted_terms!(terms_sorted, consensus); + let dividend = extract_from_sorted_terms!(terms_sorted, dividend); + let pruning_score = extract_from_sorted_terms!(terms_sorted, pruning_score); + let validator_trust = extract_from_sorted_terms!(terms_sorted, validator_trust); + let new_validator_permit = extract_from_sorted_terms!(terms_sorted, new_validator_permit); + + Active::::insert(netuid, active.clone()); + Emission::::insert(netuid, emission); + Rank::::insert(netuid, rank); + Trust::::insert(netuid, trust); + Consensus::::insert(netuid, consensus); + Dividends::::insert(netuid, dividend); + PruningScores::::insert(netuid, pruning_score); + ValidatorTrust::::insert(netuid, validator_trust); + ValidatorPermit::::insert(netuid, new_validator_permit); + } + /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] - pub fn epoch_dense( + pub fn epoch_dense_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); log::trace!("n: {n:?}"); @@ -35,7 +171,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -222,12 +358,12 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid_index); inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds log::trace!("B: {:?}", &bonds); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = Self::compute_bonds(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -249,7 +385,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); + let mut bonds: Vec> = Self::get_bonds(netuid_index); // Remove bonds referring to neurons that have registered since last tempo. inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 @@ -261,7 +397,7 @@ impl Pallet { log::trace!("ΔB: {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid_index); inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 log::trace!("emaB: {:?}", &ema_bonds); @@ -391,7 +527,7 @@ impl Pallet { Rank::::insert(netuid, cloned_ranks); Trust::::insert(netuid, cloned_trust); Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); + Incentive::::insert(NetUidStorageIndex::from(netuid), cloned_incentive); Dividends::::insert(netuid, cloned_dividends); PruningScores::::insert(netuid, cloned_pruning_scores); ValidatorTrust::::insert(netuid, cloned_validator_trust); @@ -408,11 +544,11 @@ impl Pallet { let new_bonds_row: Vec<(u16, u16)> = (0..n) .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_bonds_row); } else if validator_permit { // Only overwrite the intersection. let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_empty_bonds_row); } }); @@ -441,11 +577,27 @@ impl Pallet { /// * 'debug' ( bool ): /// - Print debugging outputs. /// - #[allow(clippy::indexing_slicing)] - pub fn epoch( + pub fn epoch_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, - ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + ) -> EpochOutput { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Initialize output keys (neuron hotkeys) and UIDs + let mut terms_map: BTreeMap = Keys::::iter_prefix(netuid) + .map(|(uid, hotkey)| { + ( + hotkey, + EpochTerms { + uid: uid as usize, + ..Default::default() + }, + ) + }) + .collect(); + // Get subnetwork size. let n = Self::get_subnetwork_n(netuid); log::trace!("Number of Neurons in Network: {n:?}"); @@ -467,7 +619,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -488,11 +640,6 @@ impl Pallet { // == Stake == // =========== - let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid) - .collect(); - log::debug!("hotkeys: {:?}", &hotkeys); - // Access network stake as normalized vector. let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); @@ -588,14 +735,15 @@ impl Pallet { // helper: hotkey → uid let uid_of = |acct: &T::AccountId| -> Option { - hotkeys - .iter() - .find(|(_, a)| a == acct) - .map(|(uid, _)| *uid as usize) + if let Some(terms) = terms_map.get(acct) { + Some(terms.uid) + } else { + None + } }; // ---------- v2 ------------------------------------------------------ - for (who, q) in WeightCommits::::iter_prefix(netuid) { + for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { if let Some(i) = uid_of(&who) { @@ -688,7 +836,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid_index); log::trace!("Bonds: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -705,7 +853,8 @@ impl Pallet { // Compute the Exponential Moving Average (EMA) of bonds. log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = + Self::compute_bonds_sparse(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -727,7 +876,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + let mut bonds: Vec> = Self::get_bonds_sparse(netuid_index); log::trace!("B: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -756,7 +905,7 @@ impl Pallet { log::trace!("ΔB (norm): {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid_index); // Normalize EMA bonds. inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); @@ -855,9 +1004,9 @@ impl Pallet { let pruning_scores: Vec = normalized_combined_emission.clone(); log::trace!("Pruning Scores: {:?}", &pruning_scores); - // =================== - // == Value storage == - // =================== + // =========================== + // == Populate epoch output == + // =========================== let cloned_stake_weight: Vec = stake .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -888,51 +1037,550 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - Active::::insert(netuid, active.clone()); - Emission::::insert(netuid, cloned_emission); - Rank::::insert(netuid, cloned_ranks); - Trust::::insert(netuid, cloned_trust); - Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); - Dividends::::insert(netuid, cloned_dividends); - PruningScores::::insert(netuid, cloned_pruning_scores); - ValidatorTrust::::insert(netuid, cloned_validator_trust); - ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - new_validator_permits - .iter() - .zip(validator_permits) - .zip(ema_bonds) - .enumerate() - .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if *new_permit { - let new_bonds_row: Vec<(u16, u16)> = ema_bond - .iter() - .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); - } else if validator_permit { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - } - }); + for (_hotkey, terms) in terms_map.iter_mut() { + terms.dividend = cloned_dividends.get(terms.uid).copied().unwrap_or_default(); + terms.incentive = cloned_incentive.get(terms.uid).copied().unwrap_or_default(); + terms.validator_emission = validator_emission + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.server_emission = server_emission.get(terms.uid).copied().unwrap_or_default(); + terms.stake_weight = cloned_stake_weight + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.active = active.get(terms.uid).copied().unwrap_or_default(); + terms.emission = cloned_emission.get(terms.uid).copied().unwrap_or_default(); + terms.rank = cloned_ranks.get(terms.uid).copied().unwrap_or_default(); + terms.trust = cloned_trust.get(terms.uid).copied().unwrap_or_default(); + terms.consensus = cloned_consensus.get(terms.uid).copied().unwrap_or_default(); + terms.pruning_score = cloned_pruning_scores + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.validator_trust = cloned_validator_trust + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.new_validator_permit = new_validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + let old_validator_permit = validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + + // Bonds + if terms.new_validator_permit { + let ema_bond = ema_bonds.get(terms.uid).cloned().unwrap_or_default(); + terms.bond = ema_bond + .iter() + .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + .collect(); + } else if old_validator_permit { + // Only overwrite the intersection. + terms.bond = vec![]; + } + } - // Emission tuples ( hotkeys, server_emission, validator_emission ) - hotkeys - .into_iter() - .map(|(uid_i, hotkey)| { - ( - hotkey, - server_emission[uid_i as usize], - validator_emission[uid_i as usize], - ) - }) - .collect() + EpochOutput(terms_map) } + // Legacy epoch fn + // #[allow(clippy::indexing_slicing)] + // pub fn epoch( + // netuid: NetUid, + // rao_emission: AlphaCurrency, + // ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // // Get subnetwork size. + // let n = Self::get_subnetwork_n(netuid); + // log::trace!("Number of Neurons in Network: {n:?}"); + + // // ====================== + // // == Active & updated == + // // ====================== + + // // Get current block. + // let current_block: u64 = Self::get_current_block_as_u64(); + // log::trace!("current_block: {current_block:?}"); + + // // Get tempo. + // let tempo: u64 = Self::get_tempo(netuid).into(); + // log::trace!("tempo:\n{tempo:?}\n"); + + // // Get activity cutoff. + // let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; + // log::trace!("activity_cutoff: {activity_cutoff:?}"); + + // // Last update vector. + // let last_update: Vec = Self::get_last_update(netuid); + // log::trace!("Last update: {:?}", &last_update); + + // // Inactive mask. + // let inactive: Vec = last_update + // .iter() + // .map(|updated| updated.saturating_add(activity_cutoff) < current_block) + // .collect(); + // log::debug!("Inactive: {:?}", inactive.clone()); + + // // Logical negation of inactive. + // let active: Vec = inactive.iter().map(|&b| !b).collect(); + + // // Block at registration vector (block when each neuron was most recently registered). + // let block_at_registration: Vec = Self::get_block_at_registration(netuid); + // log::trace!("Block at registration: {:?}", &block_at_registration); + + // // =========== + // // == Stake == + // // =========== + + // let hotkeys: Vec<(u16, T::AccountId)> = + // as IterableStorageDoubleMap>::iter_prefix(netuid) + // .collect(); + // log::debug!("hotkeys: {:?}", &hotkeys); + + // // Access network stake as normalized vector. + // let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = + // Self::get_stake_weights_for_network(netuid); + + // // Get the minimum stake required. + // let min_stake = Self::get_stake_threshold(); + + // // Set stake of validators that doesn't meet the staking threshold to 0 as filter. + // let mut filtered_stake: Vec = total_stake + // .iter() + // .map(|&s| { + // if fixed64_to_u64(s) < min_stake { + // return I64F64::from(0); + // } + // s + // }) + // .collect(); + // log::debug!("Filtered stake: {:?}", &filtered_stake); + + // inplace_normalize_64(&mut filtered_stake); + // let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); + // log::debug!("Normalised Stake: {:?}", &stake); + + // // ======================= + // // == Validator permits == + // // ======================= + + // // Get current validator permits. + // let validator_permits: Vec = Self::get_validator_permit(netuid); + // log::trace!("validator_permits: {validator_permits:?}"); + + // // Logical negation of validator_permits. + // let validator_forbids: Vec = validator_permits.iter().map(|&b| !b).collect(); + + // // Get max allowed validators. + // let max_allowed_validators: u16 = Self::get_max_allowed_validators(netuid); + // log::trace!("max_allowed_validators: {max_allowed_validators:?}"); + + // // Get new validator permits. + // let new_validator_permits: Vec = + // is_topk_nonzero(&stake, max_allowed_validators as usize); + // log::trace!("new_validator_permits: {new_validator_permits:?}"); + + // // ================== + // // == Active Stake == + // // ================== + + // let mut active_stake: Vec = stake.clone(); + + // // Remove inactive stake. + // inplace_mask_vector(&inactive, &mut active_stake); + + // // Remove non-validator stake. + // inplace_mask_vector(&validator_forbids, &mut active_stake); + + // // Normalize active stake. + // inplace_normalize(&mut active_stake); + // log::trace!("Active Stake: {:?}", &active_stake); + + // // ============= + // // == Weights == + // // ============= + + // let owner_uid: Option = Self::get_owner_uid(netuid); + + // // Access network weights row unnormalized. + // let mut weights: Vec> = Self::get_weights_sparse(netuid); + // log::trace!("Weights: {:?}", &weights); + + // // Mask weights that are not from permitted validators. + // weights = mask_rows_sparse(&validator_forbids, &weights); + // log::trace!("Weights (permit): {:?}", &weights); + + // // Remove self-weight by masking diagonal; keep owner_uid self-weight. + // if let Some(owner_uid) = owner_uid { + // weights = mask_diag_sparse_except_index(&weights, owner_uid); + // } else { + // weights = mask_diag_sparse(&weights); + // } + // log::trace!("Weights (permit+diag): {:?}", &weights); + + // // Remove weights referring to deregistered neurons. + // weights = vec_mask_sparse_matrix( + // &weights, + // &last_update, + // &block_at_registration, + // &|updated, registered| updated <= registered, + // ); + // log::trace!("Weights (permit+diag+outdate): {:?}", &weights); + + // if Self::get_commit_reveal_weights_enabled(netuid) { + // let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” + + // // helper: hotkey → uid + // let uid_of = |acct: &T::AccountId| -> Option { + // hotkeys + // .iter() + // .find(|(_, a)| a == acct) + // .map(|(uid, _)| *uid as usize) + // }; + + // // ---------- v2 ------------------------------------------------------ + // for (who, q) in WeightCommits::::iter_prefix(netuid) { + // for (_, cb, _, _) in q.iter() { + // if !Self::is_commit_expired(netuid, *cb) { + // if let Some(i) = uid_of(&who) { + // commit_blocks[i] = commit_blocks[i].min(*cb); + // } + // break; // earliest active found + // } + // } + // } + + // // ---------- v3 ------------------------------------------------------ + // for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + // for (who, cb, ..) in q.iter() { + // if !Self::is_commit_expired(netuid, *cb) { + // if let Some(i) = uid_of(who) { + // commit_blocks[i] = commit_blocks[i].min(*cb); + // } + // } + // } + // } + + // weights = vec_mask_sparse_matrix( + // &weights, + // &commit_blocks, + // &block_at_registration, + // &|cb, reg| cb < reg, + // ); + + // log::trace!( + // "Commit-reveal column mask applied ({} masked rows)", + // commit_blocks.iter().filter(|&&cb| cb != u64::MAX).count() + // ); + // } + + // // Normalize remaining weights. + // inplace_row_normalize_sparse(&mut weights); + // log::trace!("Weights (mask+norm): {:?}", &weights); + + // // ================================ + // // == Consensus, Validator Trust == + // // ================================ + + // // Compute preranks: r_j = SUM(i) w_ij * s_i + // let preranks: Vec = matmul_sparse(&weights, &active_stake, n); + // log::trace!("Ranks (before): {:?}", &preranks); + + // // Consensus majority ratio, e.g. 51%. + // let kappa: I32F32 = Self::get_float_kappa(netuid); + // // Calculate consensus as stake-weighted median of weights. + // let consensus: Vec = weighted_median_col_sparse(&active_stake, &weights, n, kappa); + // log::trace!("Consensus: {:?}", &consensus); + + // // Clip weights at majority consensus. + // let clipped_weights: Vec> = col_clip_sparse(&weights, &consensus); + // log::trace!("Clipped Weights: {:?}", &clipped_weights); + + // // Calculate validator trust as sum of clipped weights set by validator. + // let validator_trust: Vec = row_sum_sparse(&clipped_weights); + // log::trace!("Validator Trust: {:?}", &validator_trust); + + // // ============================= + // // == Ranks, Trust, Incentive == + // // ============================= + + // // Compute ranks: r_j = SUM(i) w_ij * s_i. + // let mut ranks: Vec = matmul_sparse(&clipped_weights, &active_stake, n); + // log::trace!("Ranks (after): {:?}", &ranks); + + // // Compute server trust: ratio of rank after vs. rank before. + // let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) + // log::trace!("Trust: {:?}", &trust); + + // inplace_normalize(&mut ranks); // range: I32F32(0, 1) + // let incentive: Vec = ranks.clone(); + // log::trace!("Incentive (=Rank): {:?}", &incentive); + + // // ========================= + // // == Bonds and Dividends == + // // ========================= + + // // Get validator bonds penalty in [0, 1]. + // let bonds_penalty: I32F32 = Self::get_float_bonds_penalty(netuid); + // // Calculate weights for bonds, apply bonds penalty to weights. + // // bonds_penalty = 0: weights_for_bonds = weights.clone() + // // bonds_penalty = 1: weights_for_bonds = clipped_weights.clone() + // let weights_for_bonds: Vec> = + // interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); + + // let mut dividends: Vec; + // let mut ema_bonds: Vec>; + // if Yuma3On::::get(netuid) { + // // Access network bonds. + // let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + // log::trace!("Bonds: {:?}", &bonds); + + // // Remove bonds referring to neurons that have registered since last tempo. + // // Mask if: the last tempo block happened *before* the registration block + // // ==> last_tempo <= registered + // let last_tempo: u64 = current_block.saturating_sub(tempo); + // bonds = scalar_vec_mask_sparse_matrix( + // &bonds, + // last_tempo, + // &block_at_registration, + // &|last_tempo, registered| last_tempo <= registered, + // ); + // log::trace!("Bonds: (mask) {:?}", &bonds); + + // // Compute the Exponential Moving Average (EMA) of bonds. + // log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); + // ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + // log::trace!("emaB: {:?}", &ema_bonds); + + // // Normalize EMA bonds. + // let mut ema_bonds_norm = ema_bonds.clone(); + // inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 + // log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // // # === Dividend Calculation=== + // let total_bonds_per_validator: Vec = + // row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); + // log::trace!( + // "total_bonds_per_validator: {:?}", + // &total_bonds_per_validator + // ); + + // dividends = vec_mul(&total_bonds_per_validator, &active_stake); + // inplace_normalize(&mut dividends); + // log::trace!("Dividends: {:?}", ÷nds); + // } else { + // // original Yuma - liquid alpha disabled + // // Access network bonds. + // let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + // log::trace!("B: {:?}", &bonds); + + // // Remove bonds referring to neurons that have registered since last tempo. + // // Mask if: the last tempo block happened *before* the registration block + // // ==> last_tempo <= registered + // let last_tempo: u64 = current_block.saturating_sub(tempo); + // bonds = scalar_vec_mask_sparse_matrix( + // &bonds, + // last_tempo, + // &block_at_registration, + // &|last_tempo, registered| last_tempo <= registered, + // ); + // log::trace!("B (outdatedmask): {:?}", &bonds); + + // // Normalize remaining bonds: sum_i b_ij = 1. + // inplace_col_normalize_sparse(&mut bonds, n); + // log::trace!("B (mask+norm): {:?}", &bonds); + + // // Compute bonds delta column normalized. + // let mut bonds_delta: Vec> = + // row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) + // log::trace!("ΔB: {:?}", &bonds_delta); + + // // Normalize bonds delta. + // inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 + // log::trace!("ΔB (norm): {:?}", &bonds_delta); + + // // Compute the Exponential Moving Average (EMA) of bonds. + // ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + // // Normalize EMA bonds. + // inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 + // log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); + + // // Compute dividends: d_i = SUM(j) b_ij * inc_j. + // // range: I32F32(0, 1) + // dividends = matmul_transpose_sparse(&ema_bonds, &incentive); + // inplace_normalize(&mut dividends); + // log::trace!("Dividends: {:?}", ÷nds); + + // // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + // inplace_col_max_upscale_sparse(&mut ema_bonds, n); + // } + + // // ================================= + // // == Emission and Pruning scores == + // // ================================= + + // // Compute normalized emission scores. range: I32F32(0, 1) + // let combined_emission: Vec = incentive + // .iter() + // .zip(dividends.clone()) + // .map(|(ii, di)| ii.saturating_add(di)) + // .collect(); + // let emission_sum: I32F32 = combined_emission.iter().sum(); + + // let mut normalized_server_emission: Vec = incentive.clone(); // Servers get incentive. + // let mut normalized_validator_emission: Vec = dividends.clone(); // Validators get dividends. + // let mut normalized_combined_emission: Vec = combined_emission.clone(); + // // Normalize on the sum of incentive + dividends. + // inplace_normalize_using_sum(&mut normalized_server_emission, emission_sum); + // inplace_normalize_using_sum(&mut normalized_validator_emission, emission_sum); + // inplace_normalize(&mut normalized_combined_emission); + + // // If emission is zero, replace emission with normalized stake. + // if emission_sum == I32F32::from(0) { + // // no weights set | outdated weights | self_weights + // if is_zero(&active_stake) { + // // no active stake + // normalized_validator_emission.clone_from(&stake); // do not mask inactive, assumes stake is normalized + // normalized_combined_emission.clone_from(&stake); + // } else { + // normalized_validator_emission.clone_from(&active_stake); // emission proportional to inactive-masked normalized stake + // normalized_combined_emission.clone_from(&active_stake); + // } + // } + + // // Compute rao based emission scores. range: I96F32(0, rao_emission) + // let float_rao_emission: I96F32 = I96F32::saturating_from_num(rao_emission); + + // let server_emission: Vec = normalized_server_emission + // .iter() + // .map(|se: &I32F32| I96F32::saturating_from_num(*se).saturating_mul(float_rao_emission)) + // .collect(); + // let server_emission: Vec = server_emission + // .iter() + // .map(|e: &I96F32| e.saturating_to_num::().into()) + // .collect(); + + // let validator_emission: Vec = normalized_validator_emission + // .iter() + // .map(|ve: &I32F32| I96F32::saturating_from_num(*ve).saturating_mul(float_rao_emission)) + // .collect(); + // let validator_emission: Vec = validator_emission + // .iter() + // .map(|e: &I96F32| e.saturating_to_num::().into()) + // .collect(); + + // // Only used to track emission in storage. + // let combined_emission: Vec = normalized_combined_emission + // .iter() + // .map(|ce: &I32F32| I96F32::saturating_from_num(*ce).saturating_mul(float_rao_emission)) + // .collect(); + // let combined_emission: Vec = combined_emission + // .iter() + // .map(|e: &I96F32| AlphaCurrency::from(e.saturating_to_num::())) + // .collect(); + + // log::trace!( + // "Normalized Server Emission: {:?}", + // &normalized_server_emission + // ); + // log::trace!("Server Emission: {:?}", &server_emission); + // log::trace!( + // "Normalized Validator Emission: {:?}", + // &normalized_validator_emission + // ); + // log::trace!("Validator Emission: {:?}", &validator_emission); + // log::trace!( + // "Normalized Combined Emission: {:?}", + // &normalized_combined_emission + // ); + // log::trace!("Combined Emission: {:?}", &combined_emission); + + // // Set pruning scores using combined emission scores. + // let pruning_scores: Vec = normalized_combined_emission.clone(); + // log::trace!("Pruning Scores: {:?}", &pruning_scores); + + // // =================== + // // == Value storage == + // // =================== + // let cloned_stake_weight: Vec = stake + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_emission = combined_emission.clone(); + // let cloned_ranks: Vec = ranks + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_trust: Vec = trust + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_consensus: Vec = consensus + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_incentive: Vec = incentive + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_dividends: Vec = dividends + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); + // let cloned_validator_trust: Vec = validator_trust + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // StakeWeight::::insert(netuid, cloned_stake_weight.clone()); + // Active::::insert(netuid, active.clone()); + // Emission::::insert(netuid, cloned_emission); + // Rank::::insert(netuid, cloned_ranks); + // Trust::::insert(netuid, cloned_trust); + // Consensus::::insert(netuid, cloned_consensus); + // Incentive::::insert(netuid, cloned_incentive); + // Dividends::::insert(netuid, cloned_dividends); + // PruningScores::::insert(netuid, cloned_pruning_scores); + // ValidatorTrust::::insert(netuid, cloned_validator_trust); + // ValidatorPermit::::insert(netuid, new_validator_permits.clone()); + + // new_validator_permits + // .iter() + // .zip(validator_permits) + // .zip(ema_bonds) + // .enumerate() + // .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { + // // Set bonds only if uid retains validator permit, otherwise clear bonds. + // if *new_permit { + // let new_bonds_row: Vec<(u16, u16)> = ema_bond + // .iter() + // .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + // .collect(); + // Bonds::::insert(netuid, i as u16, new_bonds_row); + // } else if validator_permit { + // // Only overwrite the intersection. + // let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; + // Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + // } + // }); + + // // Emission tuples ( hotkeys, server_emission, validator_emission ) + // hotkeys + // .into_iter() + // .map(|(uid_i, hotkey)| { + // ( + // hotkey, + // server_emission[uid_i as usize], + // validator_emission[uid_i as usize], + // ) + // }) + // .collect() + // } + pub fn get_float_rho(netuid: NetUid) -> I32F32 { I32F32::saturating_from_num(Self::get_rho(netuid)) } @@ -963,11 +1611,12 @@ impl Pallet { pub fn get_weights_sparse(netuid: NetUid) -> Vec> { let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_i) in as IterableStorageDoubleMap< + NetUidStorageIndex, + u16, + Vec<(u16, u16)>, + >>::iter_prefix(NetUidStorageIndex::from(netuid)) + .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { weights @@ -983,11 +1632,12 @@ impl Pallet { pub fn get_weights(netuid: NetUid) -> Vec> { let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_vec) in as IterableStorageDoubleMap< + NetUidStorageIndex, + u16, + Vec<(u16, u16)>, + >>::iter_prefix(NetUidStorageIndex::from(netuid)) + .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() @@ -1005,12 +1655,13 @@ impl Pallet { } /// Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds_sparse(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, + Bonds::::iter_prefix( + netuid_index, ) .filter(|(uid_i, _)| *uid_i < n as u16) { @@ -1025,12 +1676,13 @@ impl Pallet { } /// Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds(netuid: NetUid) -> Vec> { + pub fn get_bonds(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, + Bonds::::iter_prefix( + netuid_index, ) .filter(|(uid_i, _)| *uid_i < n as u16) { @@ -1046,7 +1698,7 @@ impl Pallet { bonds } - pub fn get_bonds_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1056,7 +1708,7 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1078,7 +1730,7 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUid, + netuid: NetUidStorageIndex, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1112,7 +1764,7 @@ impl Pallet { pub fn compute_ema_bonds_normal( bonds_delta: &[Vec], bonds: &[Vec], - netuid: NetUid, + netuid: NetUidStorageIndex, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1146,11 +1798,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec], // weights_for_bonds bonds: &[Vec], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1167,7 +1821,7 @@ impl Pallet { mat_ema_alpha(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema(weights, bonds, alpha) @@ -1186,11 +1840,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds_sparse( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1207,7 +1863,7 @@ impl Pallet { mat_ema_alpha_sparse(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema_sparse(weights, bonds, alpha) @@ -1362,7 +2018,7 @@ impl Pallet { clamp_value(alpha, alpha_low, alpha_high) } - pub fn compute_disabled_liquid_alpha(netuid: NetUid) -> I32F32 { + pub fn compute_disabled_liquid_alpha(netuid: NetUidStorageIndex) -> I32F32 { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) .saturating_div(I64F64::from_num(1_000_000)); @@ -1413,7 +2069,9 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid: NetUid, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds(netuid_index: NetUidStorageIndex, account_id: &T::AccountId) -> Result<(), DispatchError> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // check bonds reset enabled for this subnet let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); if !bonds_reset_enabled { @@ -1421,9 +2079,9 @@ impl Pallet { } if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { - for (i, bonds_vec) in Bonds::::iter_prefix(netuid) { + for (i, bonds_vec) in Bonds::::iter_prefix(netuid_index) { Bonds::::insert( - netuid, + netuid_index, i, bonds_vec .clone() diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 7fa6c8a919..da19d9d54e 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -85,7 +85,9 @@ pub mod pallet { use sp_std::vec::Vec; use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; - use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; + use subtensor_runtime_common::{ + AlphaCurrency, Currency, NetUid, NetUidStorageIndex, SubId, TaoCurrency, + }; #[cfg(not(feature = "std"))] use alloc::boxed::Box; @@ -1522,7 +1524,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> incentive pub type Incentive = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> dividends pub type Dividends = @@ -1533,7 +1535,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> last_update pub type LastUpdate = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU64Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU64Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> validator_trust pub type ValidatorTrust = @@ -1551,7 +1553,7 @@ pub mod pallet { pub type Weights = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1563,7 +1565,7 @@ pub mod pallet { pub type Bonds = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1670,7 +1672,7 @@ pub mod pallet { pub type WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, T::AccountId, VecDeque<(H256, u64, u64, u64)>, @@ -1802,13 +1804,13 @@ pub mod pallet { /// ====================== #[pallet::type_value] /// -- ITEM (Default number of sub-subnets) - pub fn DefaultSubsubnetCount() -> u8 { - 1 + pub fn DefaultSubsubnetCount() -> SubId { + SubId::from(1) } #[pallet::type_value] /// -- ITEM (Maximum number of sub-subnets) - pub fn MaxSubsubnetCount() -> u8 { - 8 + pub fn MaxSubsubnetCount() -> SubId { + SubId::from(8) } #[pallet::type_value] /// -- ITEM (Number of tempos in subnet super-block) @@ -1817,17 +1819,17 @@ pub mod pallet { } #[pallet::type_value] /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) - pub fn GlobalSubsubnetDecreasePerSuperblock() -> u8 { - 1 + pub fn GlobalSubsubnetDecreasePerSuperblock() -> SubId { + SubId::from(1) } #[pallet::storage] /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. pub type SubsubnetCountDesired = - StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = - StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; /// ================== /// ==== Genesis ===== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 35439479ab..ad9fc8571c 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -94,6 +94,83 @@ mod dispatches { } } + /// --- Sets the caller weights for the incentive mechanism. The call can be + /// made from the hotkey account so is potentially insecure, however, the damage + /// of changing weights is minimal if caught early. This function includes all the + /// checks that the passed weights meet the requirements. Stored as u16s they represent + /// rational values in the range [0,1] which sum to 1 and can be interpreted as + /// probabilities. The specific weights determine how inflation propagates outward + /// from this peer. + /// + /// Note: The 16 bit integers weights should represent 1.0 as the max u16. + /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all + /// elements is larger or smaller than the amount of elements * u16_max, all elements + /// will be corrected for this deviation. + /// + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + #[pallet::call_index(114)] + #[pallet::weight((Weight::from_parts(15_540_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_sub_weights( + origin: OriginFor, + netuid: NetUid, + subid: SubId, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + if Self::get_commit_reveal_weights_enabled(netuid) { + Err(Error::::CommitRevealEnabled.into()) + } else { + Self::do_set_sub_weights(origin, netuid, subid, dests, weights, version_key) + } + } + /// --- Allows a hotkey to set weights for multiple netuids as a batch. /// /// # Args: @@ -163,6 +240,38 @@ mod dispatches { Self::do_commit_weights(origin, netuid, commit_hash) } + /// ---- Used to commit a hash of your weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit_hash` (`H256`): + /// - The hash representing the committed weights. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(115)] + #[pallet::weight((Weight::from_parts(55_130_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::do_commit_sub_weights(origin, netuid, subid, commit_hash) + } + /// --- Allows a hotkey to commit weight hashes for multiple netuids as a batch. /// /// # Args: @@ -249,6 +358,59 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } + /// ---- Used to reveal the weights for a previously committed hash. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `uids` (`Vec`): + /// - The uids for the weights being revealed. + /// + /// * `values` (`Vec`): + /// - The values of the weights being revealed. + /// + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. + /// + /// * `version_key` (`u64`): + /// - The network version key. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + #[pallet::call_index(116)] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_reveal_sub_weights(origin, netuid, subid, uids, values, salt, version_key) + } + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. /// /// # Args: @@ -291,6 +453,49 @@ mod dispatches { Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) } + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(117)] + #[pallet::weight((Weight::from_parts(77_750_000, 0) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_crv3_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights(origin, netuid, subid, commit, reveal_round, 4) + } + /// ---- The implementation for batch revealing committed weights. /// /// # Args: diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fab5ecdb4..2fc9517daf 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -41,7 +41,7 @@ mod events { TaoCurrency, ), /// a caller successfully sets their weights on a subnetwork. - WeightsSet(NetUid, u16), + WeightsSet(NetUidStorageIndex, u16), /// a new neuron account has been registered to the chain. NeuronRegistered(NetUid, u16, T::AccountId), /// multiple uids have been concurrently registered. diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs index e50bf01d7d..b9378e38f6 100644 --- a/pallets/subtensor/src/macros/genesis.rs +++ b/pallets/subtensor/src/macros/genesis.rs @@ -96,9 +96,9 @@ mod genesis { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(0)); Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); + LastUpdate::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs index e6a8c72eae..58f880f1e4 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_21"; @@ -76,8 +76,8 @@ pub fn migrate_delete_subnet_21() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -86,11 +86,11 @@ pub fn migrate_delete_subnet_21() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs index c479bd613a..1cfb37d164 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_3"; @@ -78,8 +78,8 @@ pub fn migrate_delete_subnet_3() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -88,11 +88,11 @@ pub fn migrate_delete_subnet_3() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 7f9dc46bee..308c85cba6 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -7,7 +7,7 @@ use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -727,7 +727,8 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(netuid).into(), // Bonds moving avg + bonds_moving_avg: Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)) + .into(), // Bonds moving avg // Metagraph info. hotkeys, // hotkey per UID @@ -740,7 +741,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Pruning per UID - last_update: LastUpdate::::get(netuid) + last_update: LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Last update per UID @@ -752,7 +753,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Dividends per UID - incentives: Incentive::::get(netuid) + incentives: Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Mining incentives per UID @@ -1113,7 +1114,9 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), - bonds_moving_avg: Some(Self::get_bonds_moving_average(netuid).into()), + bonds_moving_avg: Some( + Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)).into(), + ), ..Default::default() }, @@ -1198,7 +1201,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(netuid) + LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1231,7 +1234,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(netuid) + Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), diff --git a/pallets/subtensor/src/rpc_info/neuron_info.rs b/pallets/subtensor/src/rpc_info/neuron_info.rs index 8eae264c6e..6e29a51ef5 100644 --- a/pallets/subtensor/src/rpc_info/neuron_info.rs +++ b/pallets/subtensor/src/rpc_info/neuron_info.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::pallet_prelude::{Decode, Encode}; extern crate alloc; use codec::Compact; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex}; #[freeze_struct("9e5a291e7e71482d")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -87,16 +87,16 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); - let weights = Weights::::get(netuid, uid) + let weights = Weights::::get(NetUidStorageIndex::from(netuid), uid) .into_iter() .filter_map(|(i, w)| { if w > 0 { @@ -107,7 +107,7 @@ impl Pallet { }) .collect::, Compact)>>(); - let bonds = >::get(netuid, uid) + let bonds = Bonds::::get(NetUidStorageIndex::from(netuid), uid) .iter() .filter_map(|(i, b)| { if *b > 0 { @@ -173,13 +173,13 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); let stake: Vec<(T::AccountId, Compact)> = vec![( diff --git a/pallets/subtensor/src/rpc_info/show_subnet.rs b/pallets/subtensor/src/rpc_info/show_subnet.rs index 2123345a4e..abd9670bb8 100644 --- a/pallets/subtensor/src/rpc_info/show_subnet.rs +++ b/pallets/subtensor/src/rpc_info/show_subnet.rs @@ -4,7 +4,7 @@ use crate::epoch::math::*; use codec::Compact; use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("9354762261420485")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -103,7 +103,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let last_update: Vec> = LastUpdate::::get(netuid) + let last_update: Vec> = LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); @@ -115,7 +115,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let incentives: Vec> = Incentive::::get(netuid) + let incentives: Vec> = Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); diff --git a/pallets/subtensor/src/rpc_info/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs index d1e0a05419..7ca8a8f948 100644 --- a/pallets/subtensor/src/rpc_info/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -4,7 +4,7 @@ use frame_support::storage::IterableStorageMap; extern crate alloc; use codec::Compact; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("edd6bd3273dfea76")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -286,7 +286,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(netuid); + let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); @@ -349,7 +349,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(netuid); + let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 816f4818bd..37e20299a9 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -2,10 +2,11 @@ //! use super::*; +use crate::epoch::run_epoch::EpochTerms; use alloc::collections::BTreeMap; use safe_math::*; -use sp_runtime::SaturatedConversion; -use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId}; +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; pub type LeaseId = u32; @@ -27,16 +28,62 @@ pub type BalanceOf = pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; impl Pallet { - pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUid { + pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { u16::from(sub_id) .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) .saturating_add(u16::from(netuid)) .into() } + pub fn get_netuid_and_subid( + sub_or_netid: NetUidStorageIndex, + ) -> Result<(NetUid, SubId), Error> { + let maybe_netuid = u16::from(sub_or_netid).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + if let Some(netuid_u16) = maybe_netuid { + let netuid = NetUid::from(netuid_u16); + + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Extract sub_id + let sub_id_u8 = u8::try_from(u16::from(sub_or_netid).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + .map_err(|_| Error::::SubNetworkDoesNotExist)?; + let sub_id = SubId::from(sub_id_u8); + + if SubsubnetCountCurrent::::get(netuid) > sub_id { + Ok((netuid, sub_id)) + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } + + pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Make sure the subsub limit is not exceeded + ensure!( + SubsubnetCountCurrent::::get(netuid) > sub_id, + Error::::SubNetworkDoesNotExist + ); + Ok(()) + } + /// Set the desired valus of sub-subnet count for a subnet identified /// by netuid - pub fn do_set_desired_subsubnet_count(netuid: NetUid, subsubnet_count: u8) -> DispatchResult { + pub fn do_set_desired_subsubnet_count( + netuid: NetUid, + subsubnet_count: SubId, + ) -> DispatchResult { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), @@ -44,7 +91,7 @@ impl Pallet { ); // Count cannot be zero - ensure!(subsubnet_count > 0, Error::::InvalidValue); + ensure!(subsubnet_count > 0.into(), Error::::InvalidValue); // Make sure we are not exceeding the max sub-subnet count ensure!( @@ -69,15 +116,14 @@ impl Pallet { let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); if let Some(rem) = current_block.checked_rem(super_block) { if rem == 0 { - let old_count = SubsubnetCountCurrent::::get(netuid); - let desired_count = SubsubnetCountDesired::::get(netuid); + let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); let min_possible_count = old_count - .saturating_sub(GlobalSubsubnetDecreasePerSuperblock::::get()) + .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) .max(1); let new_count = desired_count.max(min_possible_count); if old_count > new_count { - todo!(); // Cleanup weights // Cleanup StakeWeight @@ -93,7 +139,7 @@ impl Pallet { // Cleanup ValidatorPermit } - SubsubnetCountCurrent::::insert(netuid, new_count); + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); } } }); @@ -116,6 +162,23 @@ impl Pallet { result } + fn weighted_acc_u16(existing: u16, added: u16, weight: U64F64) -> u16 { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + } + + fn weighted_acc_alpha( + existing: AlphaCurrency, + added: AlphaCurrency, + weight: U64F64, + ) -> AlphaCurrency { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + .into() + } + /// Splits rao_emission between different sub-subnets using `split_emissions` function. /// /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission @@ -125,37 +188,97 @@ impl Pallet { netuid: NetUid, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - let aggregated: BTreeMap = + let aggregated: BTreeMap = Self::split_emissions(netuid, rao_emission) .into_iter() .enumerate() // Run epoch function for each subsubnet to distribute its portion of emissions - .flat_map(|(sub_id, emission)| { - // This is subsubnet ID, e.g. a 0-7 number - let sub_id_u8: u8 = sub_id.saturated_into(); - // This is netuid index for storing subsubnet data in storage maps and for using in - // epoch function - let subsub_netuid = - Self::get_subsubnet_storage_index(netuid, SubId::from(sub_id_u8)); - // epoch returns: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> - Self::epoch(subsub_netuid, emission).into_iter() + .flat_map(|(sub_id_usize, sub_emission)| { + let sub_id_u8: u8 = sub_id_usize.try_into().unwrap_or_default(); + let sub_id = SubId::from(sub_id_u8); + + // Run epoch function on the subsubnet emission + let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); + Self::persist_subsub_epoch_terms(netuid, sub_id, &epoch_output.as_map()); + + // Calculate subsubnet weight from the split emission (not the other way because preserving + // emission accuracy is the priority) + let sub_weight = U64F64::saturating_from_num(sub_emission) + .safe_div(U64F64::saturating_from_num(rao_emission)); + + // Produce an iterator of (hotkey, (terms, sub_weight)) tuples + epoch_output + .0 + .into_iter() + .map(move |(hotkey, terms)| (hotkey, (terms, sub_weight))) }) // Consolidate the hotkey emissions into a single BTreeMap - .fold(BTreeMap::new(), |mut acc, (hotkey, divs, incs)| { + .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { + + println!("Hotkey: {:?}, terms: {:?}", hotkey, terms); + acc.entry(hotkey) - .and_modify(|tot| { - tot.0 = tot.0.saturating_add(divs); - tot.1 = tot.1.saturating_add(incs); + .and_modify(|acc_terms| { + acc_terms.dividend = Self::weighted_acc_u16( + acc_terms.dividend, + terms.dividend, + sub_weight, + ); + acc_terms.validator_emission = Self::weighted_acc_alpha( + acc_terms.validator_emission, + terms.validator_emission, + sub_weight, + ); + acc_terms.server_emission = Self::weighted_acc_alpha( + acc_terms.server_emission, + terms.server_emission, + sub_weight, + ); + acc_terms.stake_weight = Self::weighted_acc_u16( + acc_terms.stake_weight, + terms.stake_weight, + sub_weight, + ); + acc_terms.active = acc_terms.active | terms.active; + acc_terms.emission = Self::weighted_acc_alpha( + acc_terms.emission, + terms.emission, + sub_weight, + ); + acc_terms.rank = + Self::weighted_acc_u16(acc_terms.rank, terms.rank, sub_weight); + acc_terms.trust = + Self::weighted_acc_u16(acc_terms.trust, terms.trust, sub_weight); + acc_terms.consensus = Self::weighted_acc_u16( + acc_terms.consensus, + terms.consensus, + sub_weight, + ); + acc_terms.pruning_score = Self::weighted_acc_u16( + acc_terms.pruning_score, + terms.pruning_score, + sub_weight, + ); + acc_terms.validator_trust = Self::weighted_acc_u16( + acc_terms.validator_trust, + terms.validator_trust, + sub_weight, + ); + acc_terms.new_validator_permit = + acc_terms.new_validator_permit | terms.new_validator_permit; }) - .or_insert((divs, incs)); + .or_insert(terms); acc }); - // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format - // for processing in run_coinbase + // State updates from epoch function + Self::persist_netuid_epoch_terms(netuid, &aggregated); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing emissions in run_coinbase aggregated .into_iter() - .map(|(hotkey, (divs, incs))| (hotkey, divs, incs)) + .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) .collect() } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index f5a14c490b..b12aa24e25 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -22,9 +22,12 @@ impl Pallet { Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. @@ -93,9 +96,12 @@ impl Pallet { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| v.push(0)); + LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); + } Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 7d49e0d40a..bf9d573644 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -8,7 +8,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; impl Pallet { /// ---- The implementation for committing weight hashes. @@ -44,6 +44,27 @@ impl Pallet { netuid: NetUid, commit_hash: H256, ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, SubId::MAIN, commit_hash) + } + + pub fn do_commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, subid, commit_hash) + } + + fn internal_commit_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -65,7 +86,8 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + // Rate limiting should happen per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -73,7 +95,7 @@ impl Pallet { let (first_reveal_block, last_reveal_block) = Self::get_reveal_blocks(netuid, commit_block); // 6. Retrieve or initialize the VecDeque of commits for the hotkey. - WeightCommits::::try_mutate(netuid, &who, |maybe_commits| -> DispatchResult { + WeightCommits::::try_mutate(netuid_index, &who, |maybe_commits| -> DispatchResult { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); @@ -104,7 +126,7 @@ impl Pallet { Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); // 12. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 13. Return success. Ok(()) @@ -233,6 +255,45 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + SubId::MAIN, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn do_commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn internal_commit_timelocked_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -260,7 +321,7 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -297,7 +358,7 @@ impl Pallet { )); // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 11. Return success. Ok(()) @@ -348,6 +409,33 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, SubId::MAIN, uids, values, salt, version_key) + } + + pub fn do_reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, subid, uids, values, salt, version_key) + } + + fn internal_reveal_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; @@ -360,80 +448,90 @@ impl Pallet { ); // --- 3. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 5. Hash the provided data. - let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); - - // --- 6. After removing expired commits, check if any commits are left. - if commits.is_empty() { - // Check if provided_hash matches any expired commits - if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::NoWeightsCommitFound.into()); - } - } - - // --- 7. Search for the provided_hash in the non-expired commits. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) - { - // --- 8. Get the commit block for the commit being revealed. - let (_, commit_block, _, _) = commits - .get(position) + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() .ok_or(Error::::NoWeightsCommitFound)?; - // --- 9. Ensure the commit is ready to be revealed in the current block range. - ensure!( - Self::is_reveal_block_range(netuid, *commit_block), - Error::::RevealTooEarly - ); - - // --- 10. Remove all commits up to and including the one being revealed. - for _ in 0..=position { - commits.pop_front(); + // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } } - // --- 11. If the queue is now empty, remove the storage entry for the user. + // --- 5. Hash the provided data. + let provided_hash: H256 = + Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); + + // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { - *maybe_commits = None; + // Check if provided_hash matches any expired commits + if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::NoWeightsCommitFound.into()); + } } - // --- 12. Proceed to set the revealed weights. - Self::do_set_weights(origin, netuid, uids.clone(), values.clone(), version_key)?; + // --- 7. Search for the provided_hash in the non-expired commits. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8. Get the commit block for the commit being revealed. + let (_, commit_block, _, _) = commits + .get(position) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 9. Ensure the commit is ready to be revealed in the current block range. + ensure!( + Self::is_reveal_block_range(netuid, *commit_block), + Error::::RevealTooEarly + ); + + // --- 10. Remove all commits up to and including the one being revealed. + for _ in 0..=position { + commits.pop_front(); + } - // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + // --- 11. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 14. Return ok. - Ok(()) - } else { - // --- 15. The provided_hash does not match any non-expired commits. - if expired_hashes.contains(&provided_hash) { - Err(Error::::ExpiredWeightCommit.into()) + // --- 12. Proceed to set the revealed weights. + Self::do_set_weights( + origin, + netuid, + uids.clone(), + values.clone(), + version_key, + )?; + + // --- 13. Emit the WeightsRevealed event. + Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + + // --- 14. Return ok. + Ok(()) } else { - Err(Error::::InvalidRevealCommitHashNotMatch.into()) + // --- 15. The provided_hash does not match any non-expired commits. + if expired_hashes.contains(&provided_hash) { + Err(Error::::ExpiredWeightCommit.into()) + } else { + Err(Error::::InvalidRevealCommitHashNotMatch.into()) + } } - } - }) + }, + ) } /// ---- The implementation for batch revealing committed weights. @@ -483,6 +581,9 @@ impl Pallet { salts_list: Vec>, version_keys: Vec, ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::MAIN); + // --- 1. Check that the input lists are of the same length. let num_reveals = uids_list.len(); ensure!( @@ -504,176 +605,128 @@ impl Pallet { ); // --- 4. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 6. Prepare to collect all provided hashes and their corresponding reveals. - let mut provided_hashes = Vec::new(); - let mut reveals = Vec::new(); - let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - - for ((uids, values), (salt, version_key)) in uids_list - .into_iter() - .zip(values_list) - .zip(salts_list.into_iter().zip(version_keys)) - { - // --- 6a. Hash the provided data. - let provided_hash: H256 = BlakeTwo256::hash_of(&( - who.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key, - )); - provided_hashes.push(provided_hash); - reveals.push((uids, values, version_key, provided_hash)); - } + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() + .ok_or(Error::::NoWeightsCommitFound)?; - // --- 7. Validate all reveals first to ensure atomicity. - for (_uids, _values, _version_key, provided_hash) in &reveals { - // --- 7a. Check if the provided_hash is in the non-expired commits. - if !commits - .iter() - .any(|(hash, _, _, _)| *hash == *provided_hash) - { - // --- 7b. If not found, check if it matches any expired commits. - if expired_hashes.contains(provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); + // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + break; } } - // --- 7c. Find the commit corresponding to the provided_hash. - let commit = commits - .iter() - .find(|(hash, _, _, _)| *hash == *provided_hash) - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 7d. Check if the commit is within the reveal window. - ensure!( - Self::is_reveal_block_range(netuid, commit.1), - Error::::RevealTooEarly - ); - } + // --- 6. Prepare to collect all provided hashes and their corresponding reveals. + let mut provided_hashes = Vec::new(); + let mut reveals = Vec::new(); + let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - // --- 8. All reveals are valid. Proceed to remove and process each reveal. - for (uids, values, version_key, provided_hash) in reveals { - // --- 8a. Find the position of the provided_hash. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) + for ((uids, values), (salt, version_key)) in uids_list + .into_iter() + .zip(values_list) + .zip(salts_list.into_iter().zip(version_keys)) { - // --- 8b. Remove the commit from the queue. - commits.remove(position); + // --- 6a. Hash the provided data. + let provided_hash: H256 = BlakeTwo256::hash_of(&( + who.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + provided_hashes.push(provided_hash); + reveals.push((uids, values, version_key, provided_hash)); + } - // --- 8c. Proceed to set the revealed weights. - Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + // --- 7. Validate all reveals first to ensure atomicity. + for (_uids, _values, _version_key, provided_hash) in &reveals { + // --- 7a. Check if the provided_hash is in the non-expired commits. + if !commits + .iter() + .any(|(hash, _, _, _)| *hash == *provided_hash) + { + // --- 7b. If not found, check if it matches any expired commits. + if expired_hashes.contains(provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 8d. Collect the revealed hash. - revealed_hashes.push(provided_hash); - } else if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + // --- 7c. Find the commit corresponding to the provided_hash. + let commit = commits + .iter() + .find(|(hash, _, _, _)| *hash == *provided_hash) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 7d. Check if the commit is within the reveal window. + ensure!( + Self::is_reveal_block_range(netuid, commit.1), + Error::::RevealTooEarly + ); } - } - // --- 9. If the queue is now empty, remove the storage entry for the user. - if commits.is_empty() { - *maybe_commits = None; - } + // --- 8. All reveals are valid. Proceed to remove and process each reveal. + for (uids, values, version_key, provided_hash) in reveals { + // --- 8a. Find the position of the provided_hash. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8b. Remove the commit from the queue. + commits.remove(position); + + // --- 8c. Proceed to set the revealed weights. + Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + + // --- 8d. Collect the revealed hash. + revealed_hashes.push(provided_hash); + } else if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. - Self::deposit_event(Event::WeightsBatchRevealed( - who.clone(), - netuid, - revealed_hashes, - )); + // --- 9. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 11. Return ok. - Ok(()) - }) + // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. + Self::deposit_event(Event::WeightsBatchRevealed( + who.clone(), + netuid, + revealed_hashes, + )); + + // --- 11. Return ok. + Ok(()) + }, + ) } - /// ---- The implementation for the extrinsic set_weights. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'uids' ( Vec ): - /// - The uids of the weights to be set on the chain. - /// - /// * 'values' ( Vec ): - /// - The values of the weights to set on the chain. - /// - /// * 'version_key' ( u64 ): - /// - The network version key. - /// - /// # Event: - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'IncorrectWeightVersionKey': - /// - Attempting to set weights without having an up-to-date version_key. - /// - /// * 'SettingWeightsTooFast': - /// - Attempting to set weights faster than the weights_set_rate_limit. - /// - /// * 'NeuronNoValidatorPermit': - /// - Attempting to set non-self weights without a validator permit. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'DuplicateUids': - /// - Attempting to set weights with duplicate uids. - /// - /// * 'UidsLengthExceedUidsInSubNet': - /// - Attempting to set weights above the max allowed uids. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - /// - pub fn do_set_weights( + fn internal_set_weights( origin: T::RuntimeOrigin, netuid: NetUid, + subid: SubId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; log::debug!( @@ -689,11 +742,8 @@ impl Pallet { Error::::WeightVecNotEqualSize ); - // --- 3. Check to see if this is a valid network. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + // --- 3. Check to see if this is a valid network and sub-subnet. + Self::ensure_subsubnet_exists(netuid, subid)?; // --- 4. Check to see if the number of uids is within the max allowed uids for this network. ensure!( @@ -724,7 +774,8 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); if !Self::get_commit_reveal_weights_enabled(netuid) { ensure!( - Self::check_rate_limit(netuid, neuron_uid, current_block), + // Rate limit should apply per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, current_block), Error::::SettingWeightsTooFast ); } @@ -764,22 +815,158 @@ impl Pallet { zipped_weights.push((*uid, *val)) } - // --- 17. Set weights under netuid, uid double map entry. - Weights::::insert(netuid, neuron_uid, zipped_weights); + // --- 17. Set weights under netuid_index (sub-subnet), uid double map entry. + Weights::::insert(netuid_index, neuron_uid, zipped_weights); // --- 18. Set the activity for the weights on this network. if !Self::get_commit_reveal_weights_enabled(netuid) { - Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, current_block); } // --- 19. Emit the tracking event. - log::debug!("WeightsSet( netuid:{netuid:?}, neuron_uid:{neuron_uid:?} )"); - Self::deposit_event(Event::WeightsSet(netuid, neuron_uid)); + log::debug!("WeightsSet( netuid:{netuid_index:?}, neuron_uid:{neuron_uid:?} )"); + Self::deposit_event(Event::WeightsSet(netuid_index, neuron_uid)); // --- 20. Return ok. Ok(()) } + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, SubId::MAIN, uids, values, version_key) + } + + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'subid' (u8): + /// - The u8 identifier of sub-subnet. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, subid, uids, values, version_key) + } + /// ---- The implementation for the extrinsic batch_set_weights. /// /// This call runs a batch of set weights calls, continuing on errors. @@ -887,10 +1074,15 @@ impl Pallet { /// Checks if the neuron has set weights within the weights_set_rate_limit. /// - pub fn check_rate_limit(netuid: NetUid, neuron_uid: u16, current_block: u64) -> bool { + pub fn check_rate_limit( + netuid_index: NetUidStorageIndex, + neuron_uid: u16, + current_block: u64, + ) -> bool { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); if Self::is_uid_exist_on_network(netuid, neuron_uid) { // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid, neuron_uid); + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); if last_set_weights == 0 { return true; } // (Storage default) Never set weights. diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index e233460e39..177708ed90 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid}; +use subtensor_runtime_common::{Currency, NetUid, SubId}; impl Pallet { /// Swaps the hotkey of a coldkey account. @@ -414,10 +414,15 @@ impl Pallet { // 3.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { - if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { - WeightCommits::::remove(netuid, old_hotkey); - WeightCommits::::insert(netuid, new_hotkey, old_weight_commits); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + if let Ok(old_weight_commits) = + WeightCommits::::try_get(netuid_index, old_hotkey) + { + WeightCommits::::remove(netuid_index, old_hotkey); + WeightCommits::::insert(netuid_index, new_hotkey, old_weight_commits); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } } } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 41d25c8aea..6a6bc639b4 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -6,7 +6,7 @@ use super::mock::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use crate::{utils::rate_limiting::TransactionType, *}; @@ -2839,6 +2839,7 @@ fn test_set_weights_no_parent() { /// Test that drain_pending_emission sends childkey take fully to the nominators if childkey /// doesn't have its own stake, independently of parent hotkey take. +/// cargo test --package pallet-subtensor --lib -- tests::children::test_childkey_take_drain --exact --show-output #[allow(clippy::assertions_on_constants)] #[test] fn test_childkey_take_drain() { @@ -2914,12 +2915,12 @@ fn test_childkey_take_drain() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(2, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index a660f1b815..f6b128c342 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -9,7 +9,7 @@ use frame_support::assert_ok; use pallet_subtensor_swap::position::PositionId; use sp_core::U256; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; use subtensor_swap_interface::SwapHandler; #[allow(clippy::arithmetic_side_effects)] @@ -2489,7 +2489,7 @@ fn test_drain_pending_emission_zero_emission() { run_to_block_no_epoch(netuid, 50); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Set the emission to be ZERO. @@ -2507,7 +2507,12 @@ fn test_drain_pending_emission_zero_emission() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set by epoch. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2573,7 +2578,7 @@ fn test_run_coinbase_not_started() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. @@ -2595,7 +2600,12 @@ fn test_run_coinbase_not_started() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2658,7 +2668,7 @@ fn test_run_coinbase_not_started_start_after() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs index 6a7aa7d467..7eb65c3fc0 100644 --- a/pallets/subtensor/src/tests/consensus.rs +++ b/pallets/subtensor/src/tests/consensus.rs @@ -13,6 +13,7 @@ use sp_core::U256; use std::time::Instant; use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; use substrate_fixed::types::{I32F32, I64F64}; +use subtensor_runtime_common::NetUidStorageIndex; pub fn fixed(val: f32) -> I32F32 { I32F32::from_num(val) @@ -134,7 +135,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index bdf675648b..07c938be98 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -11,7 +11,7 @@ use frame_support::{assert_err, assert_ok}; use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; use sp_core::{Get, U256}; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock::*; @@ -128,7 +128,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", @@ -595,7 +595,10 @@ fn test_1_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); }); } @@ -657,7 +660,10 @@ fn test_10_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, i as u16), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, i as u16), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), i as u16), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, i as u16), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, i as u16), @@ -705,7 +711,7 @@ fn test_512_graph() { false, u16::MAX, ); - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in validators { assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), @@ -714,7 +720,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // Note D = floor(1 / 64 * 65_535) = 1023 assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -732,7 +741,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 146); // Note R = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 65535); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); // Note C = floor(1 / (512 - 64) * 65_535) = 146 - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 146); // Note I = floor(1 / (512 - 64) * 65_535) = 146 + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 146 + ); // Note I = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -795,10 +807,10 @@ fn test_512_graph_random_weights() { bonds_penalty, ); - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { rank.push(SubtensorModule::get_rank_for_uid(netuid, uid)); - incentive.push(SubtensorModule::get_incentive_for_uid(netuid, uid)); + incentive.push(SubtensorModule::get_incentive_for_uid(netuid.into(), uid)); dividend.push(SubtensorModule::get_dividends_for_uid(netuid, uid)); emission.push(SubtensorModule::get_emission_for_uid(netuid, uid)); bondv.push(bond[uid as usize][validator]); @@ -826,14 +838,14 @@ fn test_512_graph_random_weights() { bonds_penalty, ); // Assert that dense and sparse epoch results are equal - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { assert_eq!( SubtensorModule::get_rank_for_uid(netuid, uid), rank[uid as usize] ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), incentive[uid as usize] ); assert_eq!( @@ -1070,7 +1082,7 @@ fn test_bonds() { E: [49999998, 99999999, 150000000, 200000001, 49998779, 100000610, 149996337, 200004272] P: [0.0499999989, 0.0999999992, 0.1500000006, 0.2000000011, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] emaB: [[(4, 0.2499999937), (5, 0.2499999953), (6, 0.2499999937), (7, 0.2499999937)], [(4, 0.4999999942), (5, 0.499999997), (6, 0.4999999942), (7, 0.4999999942)], [(4, 0.7499999937), (5, 0.7499999981), (6, 0.7499999995), (7, 0.7499999995)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 16383); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1118,7 +1130,7 @@ fn test_bonds() { E: [44998351, 101110561, 151667215, 202223870, 49998779, 100000610, 149996337, 200004272] P: [0.0449983515, 0.1011105615, 0.1516672159, 0.2022238704, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.2225175085), (5, 0.2225175085), (6, 0.2225175085), (7, 0.2225175085)], [(4, 0.499993208), (5, 0.4999932083), (6, 0.4999932083), (7, 0.4999932083)], [(4, 0.7499966028), (5, 0.7499966032), (6, 0.7499966032), (7, 0.7499966032)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 14582); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1155,7 +1167,7 @@ fn test_bonds() { E: [40496805, 90999783, 157929636, 210573773, 49998779, 100000610, 149996337, 200004272] P: [0.040496806, 0.0909997837, 0.157929636, 0.2105737738, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.192316476), (5, 0.192316476), (6, 0.192316476), (7, 0.192316476)], [(4, 0.4321515555), (5, 0.4321515558), (6, 0.4321515558), (7, 0.4321515558)], [(4, 0.7499967015), (5, 0.7499967027), (6, 0.7499967027), (7, 0.7499967027)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 12603); assert_eq!(bonds[1][4], 28321); assert_eq!(bonds[2][4], 49151); @@ -1192,7 +1204,7 @@ fn test_bonds() { E: [99999999, 199999999, 299999999, 399999999, 0, 0, 0, 0] P: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] emaB: [[(4, 0.1923094518), (5, 0.1923094518), (6, 0.1923094518), (7, 0.1923094518)], [(4, 0.4321507583), (5, 0.4321507583), (6, 0.4321507583), (7, 0.4321507583)], [(4, 0.7499961846), (5, 0.7499961846), (6, 0.7499961846), (7, 0.7499961846)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 12602); assert_eq!(bonds[1][7], 28320); assert_eq!(bonds[2][7], 49150); @@ -1228,7 +1240,7 @@ fn test_bonds() { E: [36443733, 81898628, 163565493, 218092144, 0, 0, 0, 500000000] P: [0.0364437331, 0.081898629, 0.1635654932, 0.2180921442, 0, 0, 0, 0.5] emaB: [[(4, 0.1922941932), (5, 0.1922941932), (6, 0.1922941932), (7, 0.1671024568)], [(4, 0.4321354993), (5, 0.4321354993), (6, 0.4321354993), (7, 0.3755230587)], [(4, 0.7499809256), (5, 0.7499809256), (6, 0.7499809256), (7, 0.749983425)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 10951); assert_eq!(bonds[1][7], 24609); assert_eq!(bonds[2][7], 49150); @@ -1250,7 +1262,7 @@ fn test_bonds() { E: [32799427, 73706612, 168638129, 224855830, 0, 0, 0, 500000000] P: [0.0327994274, 0.0737066122, 0.1686381293, 0.2248558307, 0, 0, 0, 0.5] emaB: [[(4, 0.1922789337), (5, 0.1922789337), (6, 0.1922789337), (7, 0.1458686984)], [(4, 0.4321202405), (5, 0.4321202405), (6, 0.4321202405), (7, 0.3277949789)], [(4, 0.749965667), (5, 0.749965667), (6, 0.749965667), (7, 0.74998335)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 9559); assert_eq!(bonds[1][7], 21482); assert_eq!(bonds[2][7], 49150); @@ -1272,7 +1284,7 @@ fn test_bonds() { E: [29518068, 66336137, 173203134, 230942659, 0, 0, 0, 500000000] P: [0.029518068, 0.0663361375, 0.1732031347, 0.2309426593, 0, 0, 0, 0.5] emaB: [[(4, 0.192263675), (5, 0.192263675), (6, 0.192263675), (7, 0.1278155716)], [(4, 0.4321049813), (5, 0.4321049813), (6, 0.4321049813), (7, 0.2872407278)], [(4, 0.7499504078), (5, 0.7499504078), (6, 0.7499504078), (7, 0.7499832863)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 8376); assert_eq!(bonds[1][7], 18824); assert_eq!(bonds[2][7], 49150); @@ -1408,7 +1420,7 @@ fn test_active_stake() { } else { SubtensorModule::epoch_dense(netuid, 1_000_000_000.into()); } - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in 0..n { // log::info!("\n{uid}" ); // uid_stats(netuid, uid); @@ -1473,7 +1485,7 @@ fn test_active_stake() { E: [274999999, 224999999, 250000000, 250000000] P: [0.275, 0.2249999999, 0.25, 0.25] P (u16): [65535, 53619, 59577, 59577] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 36044); // Note D = floor((0.5 * 0.9 + 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1537,7 +1549,7 @@ fn test_active_stake() { E: [272501132, 227498866, 250000000, 250000000] P: [0.272501133, 0.2274988669, 0.25, 0.25] P (u16): [65535, 54711, 60123, 60123] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 35716); // Note D = floor((0.55 * 0.9 + 0.5 * 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1736,7 +1748,7 @@ fn test_outdated_weights() { E: [250000000, 250000000, 500000000, 0] P: [0.25, 0.25, 0.5, 0] P (u16): [32767, 32767, 65535, 0] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 32767); // Note D = floor(0.5 * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -2035,7 +2047,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2 = bonds[0][2]; let bond_0_3 = bonds[0][3]; @@ -2107,7 +2119,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2_new = bonds[0][2]; let bond_0_3_new = bonds[0][3]; @@ -2483,11 +2495,15 @@ fn test_can_set_self_weight_as_subnet_owner() { // Set weight of 50% to each hotkey. // This includes a self-weight let fifty_percent: u16 = u16::MAX / 2; - Weights::::insert(netuid, 0, vec![(0, fifty_percent), (1, fifty_percent)]); + Weights::::insert( + NetUidStorageIndex::from(netuid), + 0, + vec![(0, fifty_percent), (1, fifty_percent)], + ); step_block(1); // Set updated so weights are valid - LastUpdate::::insert(netuid, vec![2, 0]); + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![2, 0]); // Run epoch let hotkey_emission = SubtensorModule::epoch(netuid, to_emit.into()); @@ -2742,7 +2758,7 @@ fn run_epoch_and_check_bonds_dividends( target_dividends: &[f32], ) { run_epoch(netuid, sparse); - let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid.into()); let dividends = SubtensorModule::get_dividends(netuid); let epsilon = I32F32::from_num(1e-3); @@ -3485,7 +3501,7 @@ fn test_yuma_3_bonds_reset() { if epoch == 20 { let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) .expect("Hotkey not found"); - let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + let _ = SubtensorModule::do_reset_bonds(netuid.into(), &hotkey); } } 21 => { @@ -3650,7 +3666,10 @@ fn test_epoch_masks_incoming_to_sniped_uid_prevents_inheritance() { SubtensorModule::epoch(netuid, 1_000.into()); assert_eq!(SubtensorModule::get_rank_for_uid(netuid, new_uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, new_uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), new_uid), + 0 + ); }); } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 8345d24fff..2013fe35ae 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -13,7 +13,9 @@ use sp_core::{Get, H256, U256}; use sp_runtime::traits::Dispatchable; use substrate_fixed::traits::FromFixed; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, TaoCurrency}; +use subtensor_runtime_common::{ + AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex, TaoCurrency, +}; use subtensor_swap_interface::{OrderType, SwapHandler}; use super::mock; @@ -2439,12 +2441,12 @@ fn test_mining_emission_distribution_validator_valiminer_miner() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(1, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(1, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 34c7ac1043..4a88fa0d09 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -8,6 +8,5 @@ use super::mock::*; #[test] fn test_subsubnet_emission_proportions() { - new_test_ext(1).execute_with(|| { - }); + new_test_ext(1).execute_with(|| {}); } diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index 5a9ebf5127..6910946982 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -8,7 +8,7 @@ use frame_system::{Config, RawOrigin}; use sp_core::{Get, H160, H256, U256}; use sp_runtime::SaturatedConversion; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock; @@ -326,7 +326,11 @@ fn test_swap_weight_commits() { add_network(netuid, 1, 1); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); assert_ok!(SubtensorModule::perform_hotkey_swap_on_all_subnets( &old_hotkey, @@ -335,9 +339,12 @@ fn test_swap_weight_commits() { &mut weight )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs index 314f72c2bd..7ed0a4b355 100644 --- a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs +++ b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs @@ -5,7 +5,7 @@ use codec::Encode; use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; -use subtensor_runtime_common::{AlphaCurrency, Currency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUidStorageIndex, TaoCurrency}; use super::mock::*; use crate::*; @@ -343,7 +343,11 @@ fn test_swap_weight_commits() { SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); assert_ok!(SubtensorModule::do_swap_hotkey( @@ -353,9 +357,12 @@ fn test_swap_weight_commits() { Some(netuid) )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index bca6945b44..74fb074169 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -5,7 +5,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::{H160, U256}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; /******************************************** tests for uids.rs file @@ -63,13 +63,13 @@ fn test_replace_neuron() { Consensus::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Incentive::::mutate(netuid, |v| { + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -130,7 +130,7 @@ fn test_replace_neuron() { 0 ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, neuron_uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), neuron_uid), 0 ); assert_eq!( @@ -145,7 +145,7 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -189,7 +189,7 @@ fn test_bonds_cleared_on_replace() { let neuron_uid = neuron_uid.unwrap(); AssociatedEvmAddress::::insert(netuid, neuron_uid, (evm_address, 1)); // set non-default bonds - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // Replace the neuron. SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); @@ -214,7 +214,7 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 4bce2ec3af..648befa3c1 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -21,7 +21,7 @@ use sp_runtime::{ }; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use tle::{ curves::drand::TinyBLS381, @@ -525,7 +525,7 @@ fn test_reveal_weights_validate() { ); // Add the commit to the hotkey - WeightCommits::::mutate(netuid, hotkey, |maybe_commits| { + WeightCommits::::mutate(NetUidStorageIndex::from(netuid), hotkey, |maybe_commits| { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); commits.push_back(( @@ -2646,8 +2646,9 @@ fn test_commit_reveal_multiple_commits() { )); // Check that commits before the revealed one are removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey) - .expect("expected 8 remaining commits"); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 8 remaining commits"); assert_eq!(remaining_commits.len(), 8); // 10 commits - 2 removed (index 0 and 1) // 4. Reveal the last commit next @@ -2662,7 +2663,8 @@ fn test_commit_reveal_multiple_commits() { )); // Remaining commits should have removed up to index 9 - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // All commits removed // After revealing all commits, attempt to commit again should now succeed @@ -2907,7 +2909,8 @@ fn test_commit_reveal_multiple_commits() { )); // Check that the first commit has been removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // Attempting to reveal the first commit should fail as it was removed @@ -3067,7 +3070,8 @@ fn test_expired_commits_handling_in_commit_and_reveal() { // 6. Verify that the number of unrevealed, non-expired commits is now 6 let commits: VecDeque<(H256, u64, u64, u64)> = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected a commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected a commit"); assert_eq!(commits.len(), 6); // 5 non-expired commits from epoch 1 + new commit // 7. Attempt to reveal an expired commit (from epoch 0) @@ -3113,7 +3117,7 @@ fn test_expired_commits_handling_in_commit_and_reveal() { )); // 10. Verify that all commits have been revealed and the queue is empty - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); // 11. Attempt to reveal again, should fail with NoWeightsCommitFound @@ -3304,7 +3308,7 @@ fn test_reveal_at_exact_epoch() { Error::::ExpiredWeightCommit ); - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3581,7 +3585,8 @@ fn test_commit_reveal_order_enforcement() { // Check that commits A and B are removed let remaining_commits = - crate::WeightCommits::::get(netuid, hotkey).expect("expected 1 remaining commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 1 remaining commit"); assert_eq!(remaining_commits.len(), 1); // Only commit C should remain // Attempt to reveal C (index 2), should succeed @@ -3776,7 +3781,7 @@ fn test_reveal_at_exact_block() { ); // Clean up for next iteration - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3854,7 +3859,7 @@ fn test_successful_batch_reveal() { )); // 4. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -3955,8 +3960,8 @@ fn test_batch_reveal_with_expired_commits() { assert_err!(result, Error::::ExpiredWeightCommit); // 5. Expired commit is not removed until a successful call - let commits = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected remaining commits"); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected remaining commits"); assert_eq!(commits.len(), 3); // 6. Try revealing the remaining commits @@ -3975,7 +3980,7 @@ fn test_batch_reveal_with_expired_commits() { )); // 7. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4382,7 +4387,7 @@ fn test_batch_reveal_with_out_of_order_commits() { )); // 6. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4446,7 +4451,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { for i in 0..commits_per_hotkey { for hotkey in &hotkeys { - let current_commits = crate::WeightCommits::::get(netuid, hotkey) + let current_commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) .unwrap_or_default(); if current_commits.len() >= max_unrevealed_commits { continue; @@ -4795,7 +4800,7 @@ fn test_get_reveal_blocks() { assert_err!(result, Error::::NoWeightsCommitFound); // **15. Verify that All Commits Have Been Removed from Storage** - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!( commits.is_none(), "Commits should be cleared after successful reveal" @@ -4851,7 +4856,7 @@ fn test_commit_weights_rate_limit() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -5388,7 +5393,7 @@ fn test_do_commit_crv3_weights_committing_too_fast() { SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("Expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::do_commit_timelocked_weights( RuntimeOrigin::signed(hotkey), diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 9fd6d27de7..475a3c1a22 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -8,7 +8,7 @@ use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; use substrate_fixed::types::{I32F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { pub fn ensure_subnet_owner_or_root( @@ -98,13 +98,13 @@ impl Pallet { pub fn get_consensus(netuid: NetUid) -> Vec { Consensus::::get(netuid) } - pub fn get_incentive(netuid: NetUid) -> Vec { + pub fn get_incentive(netuid: NetUidStorageIndex) -> Vec { Incentive::::get(netuid) } pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUid) -> Vec { + pub fn get_last_update(netuid: NetUidStorageIndex) -> Vec { LastUpdate::::get(netuid) } pub fn get_pruning_score(netuid: NetUid) -> Vec { @@ -120,7 +120,7 @@ impl Pallet { // ================================== // ==== YumaConsensus UID params ==== // ================================== - pub fn set_last_update_for_uid(netuid: NetUid, uid: u16, last_update: u64) { + pub fn set_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16, last_update: u64) { let mut updated_last_update_vec = Self::get_last_update(netuid); let Some(updated_last_update) = updated_last_update_vec.get_mut(uid as usize) else { return; @@ -183,7 +183,7 @@ impl Pallet { let vec = Consensus::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_incentive_for_uid(netuid: NetUid, uid: u16) -> u16 { + pub fn get_incentive_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u16 { let vec = Incentive::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -191,7 +191,7 @@ impl Pallet { let vec = Dividends::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_last_update_for_uid(netuid: NetUid, uid: u16) -> u64 { + pub fn get_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u64 { let vec = LastUpdate::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -576,7 +576,8 @@ impl Pallet { )); } - pub fn get_bonds_moving_average(netuid: NetUid) -> u64 { + pub fn get_bonds_moving_average(netuid_index: NetUidStorageIndex) -> u64 { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); BondsMovingAverage::::get(netuid) } pub fn set_bonds_moving_average(netuid: NetUid, bonds_moving_average: u64) { From 27ddaffc0a8d721613a90ae0751fadfacc7cd697 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 26 Aug 2025 19:18:26 -0400 Subject: [PATCH 096/379] Simplify bonds calculation (no subsubnet logic for ema) --- pallets/subtensor/src/epoch/run_epoch.rs | 61 +++++++++---------- pallets/subtensor/src/rpc_info/metagraph.rs | 4 +- pallets/subtensor/src/rpc_info/subnet_info.rs | 6 +- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/weights.rs | 34 +++++------ pallets/subtensor/src/utils/misc.rs | 3 +- 6 files changed, 52 insertions(+), 58 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index fc9bbd070f..ab805a8259 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -286,7 +286,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights(netuid); + let mut weights: Vec> = Self::get_weights(netuid_index); log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -363,7 +363,7 @@ impl Pallet { log::trace!("B: {:?}", &bonds); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_bonds(netuid_index, &weights_for_bonds, &bonds, &consensus); + ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -397,7 +397,7 @@ impl Pallet { log::trace!("ΔB: {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid_index); + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 log::trace!("emaB: {:?}", &ema_bonds); @@ -706,7 +706,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights_sparse(netuid); + let mut weights: Vec> = Self::get_weights_sparse(netuid_index); log::trace!("Weights: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -1608,47 +1608,42 @@ impl Pallet { } /// Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights_sparse(netuid: NetUid) -> Vec> { + pub fn get_weights_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in as IterableStorageDoubleMap< - NetUidStorageIndex, - u16, - Vec<(u16, u16)>, - >>::iter_prefix(NetUidStorageIndex::from(netuid)) + for (uid_i, weights_i) in Weights::::iter_prefix(netuid_index) .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { - weights - .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + if let Some(row) = weights.get_mut(uid_i as usize) { + row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + } else { + log::error!("uid_i {:?} is filtered to be less than n", uid_i); + } } } weights } /// Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights(netuid: NetUid) -> Vec> { + pub fn get_weights(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in as IterableStorageDoubleMap< - NetUidStorageIndex, - u16, - Vec<(u16, u16)>, - >>::iter_prefix(NetUidStorageIndex::from(netuid)) + for (uid_i, weights_vec) in Weights::::iter_prefix(netuid_index) .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() .filter(|(uid_j, _)| *uid_j < n as u16) { - *weights + if let Some(cell) = weights .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .get_mut(uid_j as usize) - .expect("uid_j is filtered to be less than n; qed") = - I32F32::saturating_from_num(weight_ij); + .and_then(|row| row.get_mut(uid_j as usize)) + { + *cell = I32F32::saturating_from_num(weight_ij); + } } } weights @@ -1730,8 +1725,10 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUidStorageIndex, + netuid_index: NetUidStorageIndex, ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::saturating_from_num(Self::get_bonds_moving_average(netuid)) @@ -1764,7 +1761,7 @@ impl Pallet { pub fn compute_ema_bonds_normal( bonds_delta: &[Vec], bonds: &[Vec], - netuid: NetUidStorageIndex, + netuid: NetUid, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1798,13 +1795,11 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds( - netuid_index: NetUidStorageIndex, + netuid: NetUid, weights: &[Vec], // weights_for_bonds bonds: &[Vec], consensus: &[I32F32], ) -> Vec> { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); - // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1821,7 +1816,7 @@ impl Pallet { mat_ema_alpha(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema(weights, bonds, alpha) @@ -1863,7 +1858,7 @@ impl Pallet { mat_ema_alpha_sparse(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema_sparse(weights, bonds, alpha) @@ -2018,7 +2013,7 @@ impl Pallet { clamp_value(alpha, alpha_low, alpha_high) } - pub fn compute_disabled_liquid_alpha(netuid: NetUidStorageIndex) -> I32F32 { + pub fn compute_disabled_liquid_alpha(netuid: NetUid) -> I32F32 { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) .saturating_div(I64F64::from_num(1_000_000)); diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 308c85cba6..bac19e5468 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -727,7 +727,7 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)) + bonds_moving_avg: Self::get_bonds_moving_average(netuid) .into(), // Bonds moving avg // Metagraph info. @@ -1115,7 +1115,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), bonds_moving_avg: Some( - Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)).into(), + Self::get_bonds_moving_average(netuid).into(), ), ..Default::default() }, diff --git a/pallets/subtensor/src/rpc_info/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs index 7ca8a8f948..d1e0a05419 100644 --- a/pallets/subtensor/src/rpc_info/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -4,7 +4,7 @@ use frame_support::storage::IterableStorageMap; extern crate alloc; use codec::Compact; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, TaoCurrency}; +use subtensor_runtime_common::{NetUid, TaoCurrency}; #[freeze_struct("edd6bd3273dfea76")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -286,7 +286,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); + let bonds_moving_avg = Self::get_bonds_moving_average(netuid); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); @@ -349,7 +349,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); + let bonds_moving_avg = Self::get_bonds_moving_average(netuid); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 07c938be98..3370f2973b 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -3712,7 +3712,7 @@ fn test_epoch_no_mask_when_commit_reveal_disabled() { for _ in 0..3 { SubtensorModule::epoch(netuid, 1.into()); assert!( - !SubtensorModule::get_weights_sparse(netuid)[0].is_empty(), + !SubtensorModule::get_weights_sparse(netuid.into())[0].is_empty(), "row visible when CR disabled" ); run_to_block(System::block_number() + tempo as u64 + 1); diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 648befa3c1..19e085378a 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -1488,7 +1488,7 @@ fn test_set_weights_sum_larger_than_u16_max() { assert_ok!(result); // Get max-upscaled unnormalized weights. - let all_weights: Vec> = SubtensorModule::get_weights(netuid); + let all_weights: Vec> = SubtensorModule::get_weights(netuid.into()); let weights_set: &[I32F32] = &all_weights[neuron_uid as usize]; assert_eq!(weights_set[0], I32F32::from_num(u16::MAX)); assert_eq!(weights_set[1], I32F32::from_num(u16::MAX)); @@ -5113,7 +5113,7 @@ fn test_reveal_crv3_commits_success() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -5235,7 +5235,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { step_epochs(3, netuid); // Verify that weights are not set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5270,7 +5270,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { assert_ok!(SubtensorModule::reveal_crv3_commits(netuid)); // Verify that the weights for the neuron have not been set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5607,7 +5607,7 @@ fn test_reveal_crv3_commits_decryption_failure() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_matrix = SubtensorModule::get_weights(netuid); + let weights_matrix = SubtensorModule::get_weights(netuid.into()); let weights = weights_matrix.get(neuron_uid).cloned().unwrap_or_default(); assert!(weights.iter().all(|&w| w == I32F32::from_num(0))); }); @@ -5720,7 +5720,7 @@ fn test_reveal_crv3_commits_multiple_commits_some_fail_some_succeed() { // Verify that weights are set for hotkey1 let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights1 = weights_sparse.get(neuron_uid1).cloned().unwrap_or_default(); assert!( !weights1.is_empty(), @@ -5815,7 +5815,7 @@ fn test_reveal_crv3_commits_do_set_weights_failure() { // Verify that weights are not set due to `do_set_weights` failure let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5893,7 +5893,7 @@ fn test_reveal_crv3_commits_payload_decoding_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5975,7 +5975,7 @@ fn test_reveal_crv3_commits_signature_deserialization_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6040,7 +6040,7 @@ fn test_reveal_crv3_commits_with_empty_commit_queue() { step_epochs(2, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); assert!( weights_sparse.is_empty(), "Weights should be empty as there were no commits to reveal" @@ -6127,7 +6127,7 @@ fn test_reveal_crv3_commits_with_incorrect_identity_message() { // Verify that weights are not set due to decryption failure let neuron_uid = neuron_uid as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6337,7 +6337,7 @@ fn test_reveal_crv3_commits_multiple_valid_commits_all_processed() { step_epochs(2, netuid); // ───── assertions ─────────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk).unwrap() as usize; assert!( @@ -6452,7 +6452,7 @@ fn test_reveal_crv3_commits_max_neurons() { step_epochs(2, netuid); // ───── verify weights ─────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in &committing_hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, hk).unwrap() as usize; assert!( @@ -6682,7 +6682,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6799,7 +6799,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6937,7 +6937,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { step_block(1); // automatic reveal runs here - let weights = SubtensorModule::get_weights_sparse(netuid) + let weights = SubtensorModule::get_weights_sparse(netuid.into()) .get(uid as usize) .cloned() .unwrap_or_default(); @@ -7072,7 +7072,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // ───────────────────────────────────── // 5 ▸ assertions // ───────────────────────────────────── - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let w1 = weights_sparse .get(uid1 as usize) .cloned() diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 475a3c1a22..c127fa4c71 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -576,8 +576,7 @@ impl Pallet { )); } - pub fn get_bonds_moving_average(netuid_index: NetUidStorageIndex) -> u64 { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + pub fn get_bonds_moving_average(netuid: NetUid) -> u64 { BondsMovingAverage::::get(netuid) } pub fn set_bonds_moving_average(netuid: NetUid, bonds_moving_average: u64) { From 9646bfc31ae809778894cdebe46f245f991bec8b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 12:52:24 -0400 Subject: [PATCH 097/379] Fix hotkey emission tuples --- pallets/subtensor/src/subnets/subsubnet.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 37e20299a9..4f81f69e2e 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -214,9 +214,6 @@ impl Pallet { }) // Consolidate the hotkey emissions into a single BTreeMap .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { - - println!("Hotkey: {:?}, terms: {:?}", hotkey, terms); - acc.entry(hotkey) .and_modify(|acc_terms| { acc_terms.dividend = Self::weighted_acc_u16( @@ -276,9 +273,10 @@ impl Pallet { // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format // for processing emissions in run_coinbase + // Emission tuples ( hotkeys, server_emission, validator_emission ) aggregated .into_iter() - .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) .collect() } } From 1a061461b65ebbb9d96003be19bd1e9c5248ad2a Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:14:04 -0400 Subject: [PATCH 098/379] Epoch refactored, all tests pass --- pallets/subtensor/src/epoch/run_epoch.rs | 42 ++++++++++----------- pallets/subtensor/src/rpc_info/metagraph.rs | 7 +--- pallets/subtensor/src/tests/epoch.rs | 9 ++++- pallets/subtensor/src/tests/uids.rs | 10 ++++- 4 files changed, 38 insertions(+), 30 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index ab805a8259..56d121bc4e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -75,7 +75,7 @@ impl Pallet { // Remap and return output .into_iter() - .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) .collect() } @@ -105,12 +105,13 @@ impl Pallet { .collect::>(); Incentive::::insert(netuid_index, incentive); - bonds.into_iter().enumerate().for_each(|(uid_usize, bond_vec)| { - let uid: u16 = uid_usize - .try_into() - .unwrap_or_default(); - Bonds::::insert(netuid_index, uid, bond_vec); - }); + bonds + .into_iter() + .enumerate() + .for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize.try_into().unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); } /// Persists per-netuid epoch output in state @@ -1612,8 +1613,8 @@ impl Pallet { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in Weights::::iter_prefix(netuid_index) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_i) in + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { if let Some(row) = weights.get_mut(uid_i as usize) { @@ -1631,8 +1632,8 @@ impl Pallet { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in Weights::::iter_prefix(netuid_index) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_vec) in + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() @@ -1655,10 +1656,7 @@ impl Pallet { let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - Bonds::::iter_prefix( - netuid_index, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec { bonds @@ -1676,10 +1674,7 @@ impl Pallet { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - Bonds::::iter_prefix( - netuid_index, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) { *bonds @@ -1703,7 +1698,9 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion( + netuid: NetUidStorageIndex, + ) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -2064,7 +2061,10 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid_index: NetUidStorageIndex, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds( + netuid_index: NetUidStorageIndex, + account_id: &T::AccountId, + ) -> Result<(), DispatchError> { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); // check bonds reset enabled for this subnet diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index bac19e5468..e65ddf0696 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -727,8 +727,7 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(netuid) - .into(), // Bonds moving avg + bonds_moving_avg: Self::get_bonds_moving_average(netuid).into(), // Bonds moving avg // Metagraph info. hotkeys, // hotkey per UID @@ -1114,9 +1113,7 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), - bonds_moving_avg: Some( - Self::get_bonds_moving_average(netuid).into(), - ), + bonds_moving_avg: Some(Self::get_bonds_moving_average(netuid).into()), ..Default::default() }, diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 8d1b04351c..af72ac6924 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -2464,6 +2464,7 @@ fn test_blocks_since_last_step() { }); } +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_can_set_self_weight_as_subnet_owner --exact --show-output #[test] fn test_can_set_self_weight_as_subnet_owner() { new_test_ext(1).execute_with(|| { @@ -2510,8 +2511,12 @@ fn test_can_set_self_weight_as_subnet_owner() { // hotkey_emission is [(hotkey, incentive, dividend)] assert_eq!(hotkey_emission.len(), 2); - assert_eq!(hotkey_emission[0].0, subnet_owner_hotkey); - assert_eq!(hotkey_emission[1].0, other_hotkey); + assert!( + hotkey_emission + .iter() + .any(|(hk, _, _)| *hk == subnet_owner_hotkey) + ); + assert!(hotkey_emission.iter().any(|(hk, _, _)| *hk == other_hotkey)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Both should have received incentive emission diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 74fb074169..4317337ffd 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -145,7 +145,10 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -214,7 +217,10 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } From 842dda4437fe9ade9f69cecbd1e003f7c5f71f9b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:23:36 -0400 Subject: [PATCH 099/379] happy clippy --- pallets/subtensor/src/epoch/run_epoch.rs | 22 ++++++++-------------- pallets/subtensor/src/subnets/subsubnet.rs | 11 ++++++----- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 56d121bc4e..2288cec48c 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -69,8 +69,8 @@ impl Pallet { let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); // Persist values in legacy format - Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, &output.as_map()); - Self::persist_netuid_epoch_terms(netuid, &output.as_map()); + Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, output.as_map()); + Self::persist_netuid_epoch_terms(netuid, output.as_map()); // Remap and return output @@ -735,20 +735,14 @@ impl Pallet { let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” // helper: hotkey → uid - let uid_of = |acct: &T::AccountId| -> Option { - if let Some(terms) = terms_map.get(acct) { - Some(terms.uid) - } else { - None - } - }; + let uid_of = |acct: &T::AccountId| terms_map.get(acct).map(|t| t.uid); // ---------- v2 ------------------------------------------------------ for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(&who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(&who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } break; // earliest active found } @@ -759,8 +753,8 @@ impl Pallet { for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } } } @@ -1620,7 +1614,7 @@ impl Pallet { if let Some(row) = weights.get_mut(uid_i as usize) { row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); } else { - log::error!("uid_i {:?} is filtered to be less than n", uid_i); + log::error!("uid_i {uid_i:?} is filtered to be less than n"); } } } diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 4f81f69e2e..12f80a96e0 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -158,7 +158,9 @@ impl Pallet { u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; - result[0] = result[0].saturating_add(AlphaCurrency::from(rounding_err)); + if let Some(cell) = result.first_mut() { + *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); + } result } @@ -199,7 +201,7 @@ impl Pallet { // Run epoch function on the subsubnet emission let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); - Self::persist_subsub_epoch_terms(netuid, sub_id, &epoch_output.as_map()); + Self::persist_subsub_epoch_terms(netuid, sub_id, epoch_output.as_map()); // Calculate subsubnet weight from the split emission (not the other way because preserving // emission accuracy is the priority) @@ -236,7 +238,7 @@ impl Pallet { terms.stake_weight, sub_weight, ); - acc_terms.active = acc_terms.active | terms.active; + acc_terms.active |= terms.active; acc_terms.emission = Self::weighted_acc_alpha( acc_terms.emission, terms.emission, @@ -261,8 +263,7 @@ impl Pallet { terms.validator_trust, sub_weight, ); - acc_terms.new_validator_permit = - acc_terms.new_validator_permit | terms.new_validator_permit; + acc_terms.new_validator_permit |= terms.new_validator_permit; }) .or_insert(terms); acc From 4e1e7bf9a428f2814ec71f62a649de71ebfc1d53 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:42:35 -0400 Subject: [PATCH 100/379] Add state cleanup on subsubnet reduction, cleanup --- pallets/subtensor/src/epoch/run_epoch.rs | 493 --------------------- pallets/subtensor/src/subnets/subsubnet.rs | 32 +- 2 files changed, 19 insertions(+), 506 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 2288cec48c..9c6377601e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1083,499 +1083,6 @@ impl Pallet { EpochOutput(terms_map) } - // Legacy epoch fn - // #[allow(clippy::indexing_slicing)] - // pub fn epoch( - // netuid: NetUid, - // rao_emission: AlphaCurrency, - // ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - // // Get subnetwork size. - // let n = Self::get_subnetwork_n(netuid); - // log::trace!("Number of Neurons in Network: {n:?}"); - - // // ====================== - // // == Active & updated == - // // ====================== - - // // Get current block. - // let current_block: u64 = Self::get_current_block_as_u64(); - // log::trace!("current_block: {current_block:?}"); - - // // Get tempo. - // let tempo: u64 = Self::get_tempo(netuid).into(); - // log::trace!("tempo:\n{tempo:?}\n"); - - // // Get activity cutoff. - // let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; - // log::trace!("activity_cutoff: {activity_cutoff:?}"); - - // // Last update vector. - // let last_update: Vec = Self::get_last_update(netuid); - // log::trace!("Last update: {:?}", &last_update); - - // // Inactive mask. - // let inactive: Vec = last_update - // .iter() - // .map(|updated| updated.saturating_add(activity_cutoff) < current_block) - // .collect(); - // log::debug!("Inactive: {:?}", inactive.clone()); - - // // Logical negation of inactive. - // let active: Vec = inactive.iter().map(|&b| !b).collect(); - - // // Block at registration vector (block when each neuron was most recently registered). - // let block_at_registration: Vec = Self::get_block_at_registration(netuid); - // log::trace!("Block at registration: {:?}", &block_at_registration); - - // // =========== - // // == Stake == - // // =========== - - // let hotkeys: Vec<(u16, T::AccountId)> = - // as IterableStorageDoubleMap>::iter_prefix(netuid) - // .collect(); - // log::debug!("hotkeys: {:?}", &hotkeys); - - // // Access network stake as normalized vector. - // let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = - // Self::get_stake_weights_for_network(netuid); - - // // Get the minimum stake required. - // let min_stake = Self::get_stake_threshold(); - - // // Set stake of validators that doesn't meet the staking threshold to 0 as filter. - // let mut filtered_stake: Vec = total_stake - // .iter() - // .map(|&s| { - // if fixed64_to_u64(s) < min_stake { - // return I64F64::from(0); - // } - // s - // }) - // .collect(); - // log::debug!("Filtered stake: {:?}", &filtered_stake); - - // inplace_normalize_64(&mut filtered_stake); - // let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); - // log::debug!("Normalised Stake: {:?}", &stake); - - // // ======================= - // // == Validator permits == - // // ======================= - - // // Get current validator permits. - // let validator_permits: Vec = Self::get_validator_permit(netuid); - // log::trace!("validator_permits: {validator_permits:?}"); - - // // Logical negation of validator_permits. - // let validator_forbids: Vec = validator_permits.iter().map(|&b| !b).collect(); - - // // Get max allowed validators. - // let max_allowed_validators: u16 = Self::get_max_allowed_validators(netuid); - // log::trace!("max_allowed_validators: {max_allowed_validators:?}"); - - // // Get new validator permits. - // let new_validator_permits: Vec = - // is_topk_nonzero(&stake, max_allowed_validators as usize); - // log::trace!("new_validator_permits: {new_validator_permits:?}"); - - // // ================== - // // == Active Stake == - // // ================== - - // let mut active_stake: Vec = stake.clone(); - - // // Remove inactive stake. - // inplace_mask_vector(&inactive, &mut active_stake); - - // // Remove non-validator stake. - // inplace_mask_vector(&validator_forbids, &mut active_stake); - - // // Normalize active stake. - // inplace_normalize(&mut active_stake); - // log::trace!("Active Stake: {:?}", &active_stake); - - // // ============= - // // == Weights == - // // ============= - - // let owner_uid: Option = Self::get_owner_uid(netuid); - - // // Access network weights row unnormalized. - // let mut weights: Vec> = Self::get_weights_sparse(netuid); - // log::trace!("Weights: {:?}", &weights); - - // // Mask weights that are not from permitted validators. - // weights = mask_rows_sparse(&validator_forbids, &weights); - // log::trace!("Weights (permit): {:?}", &weights); - - // // Remove self-weight by masking diagonal; keep owner_uid self-weight. - // if let Some(owner_uid) = owner_uid { - // weights = mask_diag_sparse_except_index(&weights, owner_uid); - // } else { - // weights = mask_diag_sparse(&weights); - // } - // log::trace!("Weights (permit+diag): {:?}", &weights); - - // // Remove weights referring to deregistered neurons. - // weights = vec_mask_sparse_matrix( - // &weights, - // &last_update, - // &block_at_registration, - // &|updated, registered| updated <= registered, - // ); - // log::trace!("Weights (permit+diag+outdate): {:?}", &weights); - - // if Self::get_commit_reveal_weights_enabled(netuid) { - // let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” - - // // helper: hotkey → uid - // let uid_of = |acct: &T::AccountId| -> Option { - // hotkeys - // .iter() - // .find(|(_, a)| a == acct) - // .map(|(uid, _)| *uid as usize) - // }; - - // // ---------- v2 ------------------------------------------------------ - // for (who, q) in WeightCommits::::iter_prefix(netuid) { - // for (_, cb, _, _) in q.iter() { - // if !Self::is_commit_expired(netuid, *cb) { - // if let Some(i) = uid_of(&who) { - // commit_blocks[i] = commit_blocks[i].min(*cb); - // } - // break; // earliest active found - // } - // } - // } - - // // ---------- v3 ------------------------------------------------------ - // for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { - // for (who, cb, ..) in q.iter() { - // if !Self::is_commit_expired(netuid, *cb) { - // if let Some(i) = uid_of(who) { - // commit_blocks[i] = commit_blocks[i].min(*cb); - // } - // } - // } - // } - - // weights = vec_mask_sparse_matrix( - // &weights, - // &commit_blocks, - // &block_at_registration, - // &|cb, reg| cb < reg, - // ); - - // log::trace!( - // "Commit-reveal column mask applied ({} masked rows)", - // commit_blocks.iter().filter(|&&cb| cb != u64::MAX).count() - // ); - // } - - // // Normalize remaining weights. - // inplace_row_normalize_sparse(&mut weights); - // log::trace!("Weights (mask+norm): {:?}", &weights); - - // // ================================ - // // == Consensus, Validator Trust == - // // ================================ - - // // Compute preranks: r_j = SUM(i) w_ij * s_i - // let preranks: Vec = matmul_sparse(&weights, &active_stake, n); - // log::trace!("Ranks (before): {:?}", &preranks); - - // // Consensus majority ratio, e.g. 51%. - // let kappa: I32F32 = Self::get_float_kappa(netuid); - // // Calculate consensus as stake-weighted median of weights. - // let consensus: Vec = weighted_median_col_sparse(&active_stake, &weights, n, kappa); - // log::trace!("Consensus: {:?}", &consensus); - - // // Clip weights at majority consensus. - // let clipped_weights: Vec> = col_clip_sparse(&weights, &consensus); - // log::trace!("Clipped Weights: {:?}", &clipped_weights); - - // // Calculate validator trust as sum of clipped weights set by validator. - // let validator_trust: Vec = row_sum_sparse(&clipped_weights); - // log::trace!("Validator Trust: {:?}", &validator_trust); - - // // ============================= - // // == Ranks, Trust, Incentive == - // // ============================= - - // // Compute ranks: r_j = SUM(i) w_ij * s_i. - // let mut ranks: Vec = matmul_sparse(&clipped_weights, &active_stake, n); - // log::trace!("Ranks (after): {:?}", &ranks); - - // // Compute server trust: ratio of rank after vs. rank before. - // let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) - // log::trace!("Trust: {:?}", &trust); - - // inplace_normalize(&mut ranks); // range: I32F32(0, 1) - // let incentive: Vec = ranks.clone(); - // log::trace!("Incentive (=Rank): {:?}", &incentive); - - // // ========================= - // // == Bonds and Dividends == - // // ========================= - - // // Get validator bonds penalty in [0, 1]. - // let bonds_penalty: I32F32 = Self::get_float_bonds_penalty(netuid); - // // Calculate weights for bonds, apply bonds penalty to weights. - // // bonds_penalty = 0: weights_for_bonds = weights.clone() - // // bonds_penalty = 1: weights_for_bonds = clipped_weights.clone() - // let weights_for_bonds: Vec> = - // interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); - - // let mut dividends: Vec; - // let mut ema_bonds: Vec>; - // if Yuma3On::::get(netuid) { - // // Access network bonds. - // let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); - // log::trace!("Bonds: {:?}", &bonds); - - // // Remove bonds referring to neurons that have registered since last tempo. - // // Mask if: the last tempo block happened *before* the registration block - // // ==> last_tempo <= registered - // let last_tempo: u64 = current_block.saturating_sub(tempo); - // bonds = scalar_vec_mask_sparse_matrix( - // &bonds, - // last_tempo, - // &block_at_registration, - // &|last_tempo, registered| last_tempo <= registered, - // ); - // log::trace!("Bonds: (mask) {:?}", &bonds); - - // // Compute the Exponential Moving Average (EMA) of bonds. - // log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - // ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); - // log::trace!("emaB: {:?}", &ema_bonds); - - // // Normalize EMA bonds. - // let mut ema_bonds_norm = ema_bonds.clone(); - // inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 - // log::trace!("emaB norm: {:?}", &ema_bonds_norm); - - // // # === Dividend Calculation=== - // let total_bonds_per_validator: Vec = - // row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); - // log::trace!( - // "total_bonds_per_validator: {:?}", - // &total_bonds_per_validator - // ); - - // dividends = vec_mul(&total_bonds_per_validator, &active_stake); - // inplace_normalize(&mut dividends); - // log::trace!("Dividends: {:?}", ÷nds); - // } else { - // // original Yuma - liquid alpha disabled - // // Access network bonds. - // let mut bonds: Vec> = Self::get_bonds_sparse(netuid); - // log::trace!("B: {:?}", &bonds); - - // // Remove bonds referring to neurons that have registered since last tempo. - // // Mask if: the last tempo block happened *before* the registration block - // // ==> last_tempo <= registered - // let last_tempo: u64 = current_block.saturating_sub(tempo); - // bonds = scalar_vec_mask_sparse_matrix( - // &bonds, - // last_tempo, - // &block_at_registration, - // &|last_tempo, registered| last_tempo <= registered, - // ); - // log::trace!("B (outdatedmask): {:?}", &bonds); - - // // Normalize remaining bonds: sum_i b_ij = 1. - // inplace_col_normalize_sparse(&mut bonds, n); - // log::trace!("B (mask+norm): {:?}", &bonds); - - // // Compute bonds delta column normalized. - // let mut bonds_delta: Vec> = - // row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) - // log::trace!("ΔB: {:?}", &bonds_delta); - - // // Normalize bonds delta. - // inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 - // log::trace!("ΔB (norm): {:?}", &bonds_delta); - - // // Compute the Exponential Moving Average (EMA) of bonds. - // ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); - // // Normalize EMA bonds. - // inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 - // log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); - - // // Compute dividends: d_i = SUM(j) b_ij * inc_j. - // // range: I32F32(0, 1) - // dividends = matmul_transpose_sparse(&ema_bonds, &incentive); - // inplace_normalize(&mut dividends); - // log::trace!("Dividends: {:?}", ÷nds); - - // // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - // inplace_col_max_upscale_sparse(&mut ema_bonds, n); - // } - - // // ================================= - // // == Emission and Pruning scores == - // // ================================= - - // // Compute normalized emission scores. range: I32F32(0, 1) - // let combined_emission: Vec = incentive - // .iter() - // .zip(dividends.clone()) - // .map(|(ii, di)| ii.saturating_add(di)) - // .collect(); - // let emission_sum: I32F32 = combined_emission.iter().sum(); - - // let mut normalized_server_emission: Vec = incentive.clone(); // Servers get incentive. - // let mut normalized_validator_emission: Vec = dividends.clone(); // Validators get dividends. - // let mut normalized_combined_emission: Vec = combined_emission.clone(); - // // Normalize on the sum of incentive + dividends. - // inplace_normalize_using_sum(&mut normalized_server_emission, emission_sum); - // inplace_normalize_using_sum(&mut normalized_validator_emission, emission_sum); - // inplace_normalize(&mut normalized_combined_emission); - - // // If emission is zero, replace emission with normalized stake. - // if emission_sum == I32F32::from(0) { - // // no weights set | outdated weights | self_weights - // if is_zero(&active_stake) { - // // no active stake - // normalized_validator_emission.clone_from(&stake); // do not mask inactive, assumes stake is normalized - // normalized_combined_emission.clone_from(&stake); - // } else { - // normalized_validator_emission.clone_from(&active_stake); // emission proportional to inactive-masked normalized stake - // normalized_combined_emission.clone_from(&active_stake); - // } - // } - - // // Compute rao based emission scores. range: I96F32(0, rao_emission) - // let float_rao_emission: I96F32 = I96F32::saturating_from_num(rao_emission); - - // let server_emission: Vec = normalized_server_emission - // .iter() - // .map(|se: &I32F32| I96F32::saturating_from_num(*se).saturating_mul(float_rao_emission)) - // .collect(); - // let server_emission: Vec = server_emission - // .iter() - // .map(|e: &I96F32| e.saturating_to_num::().into()) - // .collect(); - - // let validator_emission: Vec = normalized_validator_emission - // .iter() - // .map(|ve: &I32F32| I96F32::saturating_from_num(*ve).saturating_mul(float_rao_emission)) - // .collect(); - // let validator_emission: Vec = validator_emission - // .iter() - // .map(|e: &I96F32| e.saturating_to_num::().into()) - // .collect(); - - // // Only used to track emission in storage. - // let combined_emission: Vec = normalized_combined_emission - // .iter() - // .map(|ce: &I32F32| I96F32::saturating_from_num(*ce).saturating_mul(float_rao_emission)) - // .collect(); - // let combined_emission: Vec = combined_emission - // .iter() - // .map(|e: &I96F32| AlphaCurrency::from(e.saturating_to_num::())) - // .collect(); - - // log::trace!( - // "Normalized Server Emission: {:?}", - // &normalized_server_emission - // ); - // log::trace!("Server Emission: {:?}", &server_emission); - // log::trace!( - // "Normalized Validator Emission: {:?}", - // &normalized_validator_emission - // ); - // log::trace!("Validator Emission: {:?}", &validator_emission); - // log::trace!( - // "Normalized Combined Emission: {:?}", - // &normalized_combined_emission - // ); - // log::trace!("Combined Emission: {:?}", &combined_emission); - - // // Set pruning scores using combined emission scores. - // let pruning_scores: Vec = normalized_combined_emission.clone(); - // log::trace!("Pruning Scores: {:?}", &pruning_scores); - - // // =================== - // // == Value storage == - // // =================== - // let cloned_stake_weight: Vec = stake - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_emission = combined_emission.clone(); - // let cloned_ranks: Vec = ranks - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_trust: Vec = trust - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_consensus: Vec = consensus - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_incentive: Vec = incentive - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_dividends: Vec = dividends - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); - // let cloned_validator_trust: Vec = validator_trust - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - // Active::::insert(netuid, active.clone()); - // Emission::::insert(netuid, cloned_emission); - // Rank::::insert(netuid, cloned_ranks); - // Trust::::insert(netuid, cloned_trust); - // Consensus::::insert(netuid, cloned_consensus); - // Incentive::::insert(netuid, cloned_incentive); - // Dividends::::insert(netuid, cloned_dividends); - // PruningScores::::insert(netuid, cloned_pruning_scores); - // ValidatorTrust::::insert(netuid, cloned_validator_trust); - // ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - - // new_validator_permits - // .iter() - // .zip(validator_permits) - // .zip(ema_bonds) - // .enumerate() - // .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // // Set bonds only if uid retains validator permit, otherwise clear bonds. - // if *new_permit { - // let new_bonds_row: Vec<(u16, u16)> = ema_bond - // .iter() - // .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - // .collect(); - // Bonds::::insert(netuid, i as u16, new_bonds_row); - // } else if validator_permit { - // // Only overwrite the intersection. - // let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - // Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - // } - // }); - - // // Emission tuples ( hotkeys, server_emission, validator_emission ) - // hotkeys - // .into_iter() - // .map(|(uid_i, hotkey)| { - // ( - // hotkey, - // server_emission[uid_i as usize], - // validator_emission[uid_i as usize], - // ) - // }) - // .collect() - // } - pub fn get_float_rho(netuid: NetUid) -> I32F32 { I32F32::saturating_from_num(Self::get_rho(netuid)) } diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 12f80a96e0..9452cbf09f 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,19 +124,25 @@ impl Pallet { let new_count = desired_count.max(min_possible_count); if old_count > new_count { - todo!(); - // Cleanup weights - // Cleanup StakeWeight - // Cleanup Active - // Cleanup Emission - // Cleanup Rank - // Cleanup Trust - // Cleanup Consensus - // Cleanup Incentive - // Cleanup Dividends - // Cleanup PruningScores - // Cleanup ValidatorTrust - // Cleanup ValidatorPermit + for subid in new_count..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + } } SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); From fff6f0b7423463e1c1c91010b3f1fa127c59a61e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 27 Aug 2025 21:40:14 +0000 Subject: [PATCH 101/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index ad9fc8571c..fef807ff4d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -78,7 +78,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().reads(4112_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -197,9 +197,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(95_160_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(19_180_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -229,8 +229,8 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(55_130_000, 0) - .saturating_add(T::DbWeight::get().reads(7)) + #[pallet::weight((Weight::from_parts(67_770_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -295,8 +295,8 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(100)] - #[pallet::weight((Weight::from_parts(82_010_000, 0) - .saturating_add(T::DbWeight::get().reads(8)) + #[pallet::weight((Weight::from_parts(106_600_000, 0) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -345,7 +345,7 @@ mod dispatches { /// #[pallet::call_index(97)] #[pallet::weight((Weight::from_parts(122_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, @@ -442,7 +442,7 @@ mod dispatches { /// #[pallet::call_index(99)] #[pallet::weight((Weight::from_parts(77_750_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_crv3_weights( origin: T::RuntimeOrigin, @@ -537,7 +537,7 @@ mod dispatches { /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] #[pallet::weight((Weight::from_parts(412_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, @@ -1095,7 +1095,7 @@ mod dispatches { /// #[pallet::call_index(6)] #[pallet::weight((Weight::from_parts(197_900_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::Yes))] pub fn register( origin: OriginFor, @@ -1112,7 +1112,7 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] #[pallet::weight((Weight::from_parts(111_700_000, 0) - .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) @@ -1130,7 +1130,7 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) - .saturating_add(T::DbWeight::get().reads(49)) + .saturating_add(T::DbWeight::get().reads(50_u64)) .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::Yes))] pub fn burned_register( origin: OriginFor, @@ -1399,7 +1399,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(36)) + .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Normal, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) @@ -1744,7 +1744,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) + .saturating_add(T::DbWeight::get().reads(36_u64)) .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, @@ -2407,7 +2407,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(64_530_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From f251b010c6d79938e05da3464bc4614e4aca77e6 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 19:02:19 -0400 Subject: [PATCH 102/379] Add test plan --- pallets/subtensor/src/subnets/subsubnet.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 9452cbf09f..f2de8b347d 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -25,7 +25,7 @@ pub type BalanceOf = /// /// Changing this value will require a migration of all epoch maps. /// -pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; impl Pallet { pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 4a88fa0d09..73bc323829 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -4,8 +4,28 @@ clippy::unwrap_used )] +/// Test plan: +/// - [ ] Netuid index math (with SubsubnetCountCurrent limiting) +/// - [ ] Emissions are split proportionally +/// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +/// - [ ] Weights can be set/commited/revealed by subsubnet +/// - [ ] Rate limiting is enforced by subsubnet +/// - [ ] Bonds are applied per subsubnet +/// - [ ] Incentives are per subsubnet +/// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) +/// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +/// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +/// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +/// - [ ] Subnet epoch terms persist in state +/// - [ ] Subsubnet epoch terms persist in state + use super::mock::*; +#[test] +fn test_index_from_netuid_and_subnet() { + new_test_ext(1).execute_with(|| {}); +} + #[test] fn test_subsubnet_emission_proportions() { new_test_ext(1).execute_with(|| {}); From df6e32b59c56acb3de55f6ece544770b917c1997 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 12:55:21 -0400 Subject: [PATCH 103/379] Convert TimelockedWeightCommits to be per-subnet and use NetUidStorageIndex --- .../subtensor/src/coinbase/reveal_commits.rs | 277 +++++++++--------- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- pallets/subtensor/src/lib.rs | 6 +- pallets/subtensor/src/macros/events.rs | 4 +- .../migrate_crv3_commits_add_block.rs | 5 +- pallets/subtensor/src/subnets/subsubnet.rs | 7 + pallets/subtensor/src/subnets/weights.rs | 56 ++-- pallets/subtensor/src/tests/migration.rs | 32 +- pallets/subtensor/src/tests/subsubnet.rs | 1 - pallets/subtensor/src/tests/weights.rs | 42 ++- 10 files changed, 238 insertions(+), 194 deletions(-) diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index e7bc6dc008..d0c068303b 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, SubId}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, @@ -44,152 +44,159 @@ impl Pallet { // Weights revealed must have been committed during epoch `cur_epoch - reveal_period`. let reveal_epoch = cur_epoch.saturating_sub(reveal_period); - // Clean expired commits - for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid) { - if epoch < reveal_epoch { - TimelockedWeightCommits::::remove(netuid, epoch); - } - } + // All subsubnets share the same epoch, so the reveal_period/reveal_epoch are also the same + // Reveal for all subsubnets + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - // No commits to reveal until at least epoch reveal_period. - if cur_epoch < reveal_period { - log::trace!("Failed to reveal commit for subnet {netuid} Too early"); - return Ok(()); - } - - let mut entries = TimelockedWeightCommits::::take(netuid, reveal_epoch); - let mut unrevealed = VecDeque::new(); - - // Keep popping items off the front of the queue until we successfully reveal a commit. - while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = - entries.pop_front() - { - // Try to get the round number from pallet_drand. - let pulse = match pallet_drand::Pulses::::get(round_number) { - Some(p) => p, - None => { - // Round number used was not found on the chain. Skip this commit. - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." - ); - unrevealed.push_back(( - who, - commit_block, - serialized_compresssed_commit, - round_number, - )); - continue; + // Clean expired commits + for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid_index) { + if epoch < reveal_epoch { + TimelockedWeightCommits::::remove(netuid_index, epoch); } - }; + } - let reader = &mut &serialized_compresssed_commit[..]; - let commit = match TLECiphertext::::deserialize_compressed(reader) { - Ok(c) => c, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing the commit: {e:?}" - ); - continue; - } - }; - - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - - let sig_reader = &mut &signature_bytes[..]; - let sig = match ::SignatureGroup::deserialize_compressed( - sig_reader, - ) { - Ok(s) => s, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" - ); - continue; - } - }; + // No commits to reveal until at least epoch reveal_period. + if cur_epoch < reveal_period { + log::trace!("Failed to reveal commit for subsubnet {netuid_index} Too early"); + return Ok(()); + } - let decrypted_bytes: Vec = match tld::( - commit, sig, - ) { - Ok(d) => d, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error decrypting the commit: {e:?}" - ); - continue; - } - }; - - // ------------------------------------------------------------------ - // Try to decode payload with the new and legacy formats. - // ------------------------------------------------------------------ - let (uids, values, version_key) = { - let mut reader_new = &decrypted_bytes[..]; - if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { - // Verify hotkey matches committer - let mut hk_reader = &payload.hotkey[..]; - match T::AccountId::decode(&mut hk_reader) { - Ok(decoded_hotkey) if decoded_hotkey == who => { - (payload.uids, payload.values, payload.version_key) - } - Ok(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to hotkey mismatch in payload" - ); - continue; - } - Err(e) => { - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing hotkey: {e:?}" - ); - continue; + let mut entries = TimelockedWeightCommits::::take(netuid_index, reveal_epoch); + let mut unrevealed = VecDeque::new(); + + // Keep popping items off the front of the queue until we successfully reveal a commit. + while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = + entries.pop_front() + { + // Try to get the round number from pallet_drand. + let pulse = match pallet_drand::Pulses::::get(round_number) { + Some(p) => p, + None => { + // Round number used was not found on the chain. Skip this commit. + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." + ); + unrevealed.push_back(( + who, + commit_block, + serialized_compresssed_commit, + round_number, + )); + continue; + } + }; + + let reader = &mut &serialized_compresssed_commit[..]; + let commit = match TLECiphertext::::deserialize_compressed(reader) { + Ok(c) => c, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" + ); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + + let sig_reader = &mut &signature_bytes[..]; + let sig = match ::SignatureGroup::deserialize_compressed( + sig_reader, + ) { + Ok(s) => s, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" + ); + continue; + } + }; + + let decrypted_bytes: Vec = match tld::( + commit, sig, + ) { + Ok(d) => d, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" + ); + continue; + } + }; + + // ------------------------------------------------------------------ + // Try to decode payload with the new and legacy formats. + // ------------------------------------------------------------------ + let (uids, values, version_key) = { + let mut reader_new = &decrypted_bytes[..]; + if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { + // Verify hotkey matches committer + let mut hk_reader = &payload.hotkey[..]; + match T::AccountId::decode(&mut hk_reader) { + Ok(decoded_hotkey) if decoded_hotkey == who => { + (payload.uids, payload.values, payload.version_key) + } + Ok(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" + ); + continue; + } + Err(e) => { + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" + ); + continue; + } } } } - } - } else { - // Fallback to legacy payload - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing both payload formats: {e:?}" - ); - continue; + } else { + // Fallback to legacy payload + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" + ); + continue; + } } } + }; + + // ------------------------------------------------------------------ + // Apply weights + // ------------------------------------------------------------------ + if let Err(e) = Self::do_set_sub_weights( + T::RuntimeOrigin::signed(who.clone()), + netuid, + SubId::from(subid), + uids, + values, + version_key, + ) { + log::trace!( + "Failed to `do_set_sub_weights` for subsubnet {netuid_index} submitted by {who:?}: {e:?}" + ); + continue; } - }; - - // ------------------------------------------------------------------ - // Apply weights - // ------------------------------------------------------------------ - if let Err(e) = Self::do_set_weights( - T::RuntimeOrigin::signed(who.clone()), - netuid, - uids, - values, - version_key, - ) { - log::trace!( - "Failed to `do_set_weights` for subnet {netuid} submitted by {who:?}: {e:?}" - ); - continue; - } - Self::deposit_event(Event::TimelockedWeightsRevealed(netuid, who)); - } + Self::deposit_event(Event::TimelockedWeightsRevealed(netuid_index, who)); + } - if !unrevealed.is_empty() { - TimelockedWeightCommits::::insert(netuid, reveal_epoch, unrevealed); + if !unrevealed.is_empty() { + TimelockedWeightCommits::::insert(netuid_index, reveal_epoch, unrevealed); + } } Ok(()) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 9c6377601e..e44ff5b7ef 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -750,7 +750,7 @@ impl Pallet { } // ---------- v3 ------------------------------------------------------ - for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid_index) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 3052105a9e..961109c200 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1689,7 +1689,7 @@ pub mod pallet { pub type TimelockedWeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1706,7 +1706,7 @@ pub mod pallet { pub type CRV3WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1722,7 +1722,7 @@ pub mod pallet { pub type CRV3WeightCommitsV2 = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fc9517daf..0259863cd8 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -406,12 +406,12 @@ mod events { /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. /// - **reveal_round**: The round at which weights can be revealed. - TimelockedWeightsCommitted(T::AccountId, NetUid, H256, u64), + TimelockedWeightsCommitted(T::AccountId, NetUidStorageIndex, H256, u64), /// Timelocked Weights have been successfully revealed. /// /// - **netuid**: The network identifier. /// - **who**: The account ID of the user revealing the weights. - TimelockedWeightsRevealed(NetUid, T::AccountId), + TimelockedWeightsRevealed(NetUidStorageIndex, T::AccountId), } } diff --git a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs index 27f2fe6d65..bf5a0bb2b5 100644 --- a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs +++ b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs @@ -22,9 +22,10 @@ pub fn migrate_crv3_commits_add_block() -> Weight { log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); // iterate over *all* (netuid, epoch, queue) triples - for (netuid, epoch, old_q) in CRV3WeightCommits::::drain() { + for (netuid_index, epoch, old_q) in CRV3WeightCommits::::drain() { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let (netuid, _) = Pallet::::get_netuid_and_subid(netuid_index).unwrap_or_default(); let commit_block = Pallet::::get_first_block_of_epoch(netuid, epoch); // convert VecDeque<(who,cipher,rnd)> → VecDeque<(who,cb,cipher,rnd)> @@ -34,7 +35,7 @@ pub fn migrate_crv3_commits_add_block() -> Weight { .collect(); // write back under *new* storage definition - CRV3WeightCommitsV2::::insert(netuid, epoch, new_q); + CRV3WeightCommitsV2::::insert(netuid_index, epoch, new_q); } // mark as done diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index f2de8b347d..eaeadd38bd 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -142,6 +142,13 @@ impl Pallet { // Cleanup WeightCommits let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = TimelockedWeightCommits::::clear_prefix( + netuid_index, + u32::MAX, + None, + ); } } diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index f60d59f376..f9393cd6bd 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -334,37 +334,41 @@ impl Pallet { false => Self::get_epoch_index(netuid, cur_block), }; - TimelockedWeightCommits::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { - // 7. Verify that the number of unrevealed commits is within the allowed limit. + TimelockedWeightCommits::::try_mutate( + netuid_index, + cur_epoch, + |commits| -> DispatchResult { + // 7. Verify that the number of unrevealed commits is within the allowed limit. - let unrevealed_commits_for_who = commits - .iter() - .filter(|(account, _, _, _)| account == &who) - .count(); - ensure!( - unrevealed_commits_for_who < 10, - Error::::TooManyUnrevealedCommits - ); + let unrevealed_commits_for_who = commits + .iter() + .filter(|(account, _, _, _)| account == &who) + .count(); + ensure!( + unrevealed_commits_for_who < 10, + Error::::TooManyUnrevealedCommits + ); - // 8. Append the new commit with calculated reveal blocks. - // Hash the commit before it is moved, for the event - let commit_hash = BlakeTwo256::hash(&commit); - commits.push_back((who.clone(), cur_block, commit, reveal_round)); + // 8. Append the new commit with calculated reveal blocks. + // Hash the commit before it is moved, for the event + let commit_hash = BlakeTwo256::hash(&commit); + commits.push_back((who.clone(), cur_block, commit, reveal_round)); - // 9. Emit the WeightsCommitted event - Self::deposit_event(Event::TimelockedWeightsCommitted( - who.clone(), - netuid, - commit_hash, - reveal_round, - )); + // 9. Emit the WeightsCommitted event + Self::deposit_event(Event::TimelockedWeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + reveal_round, + )); - // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); + // 10. Update the last commit block for the hotkey's UID. + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); - // 11. Return success. - Ok(()) - }) + // 11. Return success. + Ok(()) + }, + ) } /// ---- The implementation for revealing committed weights. diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index e93aab7669..f19d8dec4d 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -22,7 +22,7 @@ use sp_io::hashing::twox_128; use sp_runtime::traits::Zero; use substrate_fixed::types::I96F32; use substrate_fixed::types::extra::U2; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -1063,10 +1063,17 @@ fn test_migrate_crv3_commits_add_block() { let old_queue: VecDeque<_> = VecDeque::from(vec![(who, ciphertext.clone(), round)]); - CRV3WeightCommits::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias - assert_eq!(CRV3WeightCommits::::get(netuid, epoch), old_queue); + assert_eq!( + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch), + old_queue + ); assert!( !HasMigrationRun::::get(MIG_NAME.to_vec()), @@ -1091,11 +1098,11 @@ fn test_migrate_crv3_commits_add_block() { // Old storage must be empty (drained) assert!( - CRV3WeightCommits::::get(netuid, epoch).is_empty(), + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); - let new_q = CRV3WeightCommitsV2::::get(netuid, epoch); + let new_q = CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!(new_q.len(), 1, "exactly one migrated element expected"); let (who2, commit_block, cipher2, round2) = new_q.front().cloned().unwrap(); @@ -1318,18 +1325,23 @@ fn test_migrate_crv3_v2_to_timelocked() { VecDeque::from(vec![(who, commit_block, ciphertext.clone(), round)]); // Insert under the deprecated alias - CRV3WeightCommitsV2::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommitsV2::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias assert_eq!( - CRV3WeightCommitsV2::::get(netuid, epoch), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch), old_queue, "pre-migration: old queue should be present" ); // Destination should be empty pre-migration assert!( - TimelockedWeightCommits::::get(netuid, epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch) + .is_empty(), "pre-migration: destination should be empty" ); @@ -1356,12 +1368,12 @@ fn test_migrate_crv3_v2_to_timelocked() { // Old storage must be empty (drained) assert!( - CRV3WeightCommitsV2::::get(netuid, epoch).is_empty(), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); // New storage must match exactly - let new_q = TimelockedWeightCommits::::get(netuid, epoch); + let new_q = TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!( new_q, old_queue, "migrated queue must exactly match the old queue" diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 73bc323829..7730dfcb0f 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -18,7 +18,6 @@ /// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms /// - [ ] Subnet epoch terms persist in state /// - [ ] Subsubnet epoch terms persist in state - use super::mock::*; #[test] diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 3b3873b7d3..a52ca67607 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -5299,7 +5299,8 @@ fn test_do_commit_crv3_weights_success() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!(commits.len(), 1); assert_eq!(commits[0].0, hotkey); assert_eq!(commits[0].2, commit_data); @@ -6154,7 +6155,8 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!( commits.len(), 10, @@ -6189,7 +6191,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { let bounded_commit = vec![epoch as u8; 5].try_into().expect("bounded vec"); assert_ok!(TimelockedWeightCommits::::try_mutate( - netuid, + NetUidStorageIndex::from(netuid), epoch, |q| -> DispatchResult { q.push_back((hotkey, cur_block, bounded_commit, reveal_round)); @@ -6199,8 +6201,14 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { } // Sanity – both epochs presently hold a commit. - assert!(!TimelockedWeightCommits::::get(netuid, past_epoch).is_empty()); - assert!(!TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty()); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty() + ); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty() + ); // --------------------------------------------------------------------- // Run the reveal pass WITHOUT a pulse – only expiry housekeeping runs. @@ -6209,13 +6217,15 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { // past_epoch (< reveal_epoch) must be gone assert!( - TimelockedWeightCommits::::get(netuid, past_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty(), "expired epoch {past_epoch} should be cleared" ); // reveal_epoch queue is *kept* because its commit could still be revealed later. assert!( - !TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty(), "reveal-epoch {reveal_epoch} must be retained until commit can be revealed" ); }); @@ -6891,10 +6901,11 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { )); // epoch in which commit was stored - let stored_epoch = TimelockedWeightCommits::::iter_prefix(netuid) - .next() - .map(|(e, _)| e) - .expect("commit stored"); + let stored_epoch = + TimelockedWeightCommits::::iter_prefix(NetUidStorageIndex::from(netuid)) + .next() + .map(|(e, _)| e) + .expect("commit stored"); // first block of reveal epoch (commit_epoch + RP) let first_reveal_epoch = stored_epoch + SubtensorModule::get_reveal_period(netuid); @@ -6905,7 +6916,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { // run *one* block inside reveal epoch without pulse → commit should stay queued step_block(1); assert!( - !TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "commit must remain queued when pulse is missing" ); @@ -6933,7 +6945,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { assert!(!weights.is_empty(), "weights must be set after pulse"); assert!( - TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "queue should be empty after successful reveal" ); }); @@ -7076,7 +7089,8 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // commit should be gone assert!( - TimelockedWeightCommits::::get(netuid, commit_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), commit_epoch) + .is_empty(), "commit storage should be cleaned after reveal" ); }); From 5dec4269c9b8a1df37b904fb9a70e890e7427348 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 19:57:46 -0400 Subject: [PATCH 104/379] Add netuid index math tests --- pallets/subtensor/src/subnets/subsubnet.rs | 16 +++- pallets/subtensor/src/tests/subsubnet.rs | 104 ++++++++++++++++++--- 2 files changed, 102 insertions(+), 18 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index eaeadd38bd..cca8df95db 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -27,6 +27,10 @@ pub type BalanceOf = /// pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; +// Theoretical maximum number of subsubnets per subnet +// GLOBAL_MAX_SUBNET_COUNT * MAX_SUBSUBNET_COUNT_PER_SUBNET should be 0x10000 +pub const MAX_SUBSUBNET_COUNT_PER_SUBNET: u8 = 16; + impl Pallet { pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { u16::from(sub_id) @@ -36,9 +40,9 @@ impl Pallet { } pub fn get_netuid_and_subid( - sub_or_netid: NetUidStorageIndex, + netuid_index: NetUidStorageIndex, ) -> Result<(NetUid, SubId), Error> { - let maybe_netuid = u16::from(sub_or_netid).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + let maybe_netuid = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT); if let Some(netuid_u16) = maybe_netuid { let netuid = NetUid::from(netuid_u16); @@ -49,7 +53,7 @@ impl Pallet { ); // Extract sub_id - let sub_id_u8 = u8::try_from(u16::from(sub_or_netid).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + let sub_id_u8 = u8::try_from(u16::from(netuid_index).safe_div(GLOBAL_MAX_SUBNET_COUNT)) .map_err(|_| Error::::SubNetworkDoesNotExist)?; let sub_id = SubId::from(sub_id_u8); @@ -99,6 +103,12 @@ impl Pallet { Error::::InvalidValue ); + // Make sure we are not allowing numbers that will break the math + ensure!( + subsubnet_count <= SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET), + Error::::InvalidValue + ); + SubsubnetCountDesired::::insert(netuid, subsubnet_count); Ok(()) } diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 7730dfcb0f..41c66f0da1 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -4,25 +4,99 @@ clippy::unwrap_used )] -/// Test plan: -/// - [ ] Netuid index math (with SubsubnetCountCurrent limiting) -/// - [ ] Emissions are split proportionally -/// - [ ] Sum of split emissions is equal to rao_emission passed to epoch -/// - [ ] Weights can be set/commited/revealed by subsubnet -/// - [ ] Rate limiting is enforced by subsubnet -/// - [ ] Bonds are applied per subsubnet -/// - [ ] Incentives are per subsubnet -/// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) -/// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -/// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared -/// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms -/// - [ ] Subnet epoch terms persist in state -/// - [ ] Subsubnet epoch terms persist in state +// Run all tests +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::subsubnet --show-output + +// Test plan: +// - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [ ] Emissions are split proportionally +// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +// - [ ] Weights can be set/commited/revealed by subsubnet +// - [ ] Rate limiting is enforced by subsubnet +// - [ ] Bonds are applied per subsubnet +// - [ ] Incentives are per subsubnet +// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) +// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [ ] Subnet epoch terms persist in state +// - [ ] Subsubnet epoch terms persist in state + use super::mock::*; +use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; +use crate::*; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; #[test] fn test_index_from_netuid_and_subnet() { - new_test_ext(1).execute_with(|| {}); + new_test_ext(1).execute_with(|| { + [ + (0_u16, 0_u8), + (GLOBAL_MAX_SUBNET_COUNT / 2, 1), + (GLOBAL_MAX_SUBNET_COUNT / 2, 7), + (GLOBAL_MAX_SUBNET_COUNT / 2, 14), + (GLOBAL_MAX_SUBNET_COUNT / 2, 15), + (GLOBAL_MAX_SUBNET_COUNT - 1, 1), + (GLOBAL_MAX_SUBNET_COUNT - 1, 7), + (GLOBAL_MAX_SUBNET_COUNT - 1, 14), + (GLOBAL_MAX_SUBNET_COUNT - 1, 15), + ] + .iter() + .for_each(|(netuid, sub_id)| { + let idx = SubtensorModule::get_subsubnet_storage_index( + NetUid::from(*netuid), + SubId::from(*sub_id), + ); + let expected = *sub_id as u64 * GLOBAL_MAX_SUBNET_COUNT as u64 + *netuid as u64; + assert_eq!(idx, NetUidStorageIndex::from(expected as u16)); + }); + }); +} + +#[test] +fn test_netuid_and_subnet_from_index() { + new_test_ext(1).execute_with(|| { + [ + 0_u16, + 1, + 14, + 15, + 16, + 17, + GLOBAL_MAX_SUBNET_COUNT - 1, + GLOBAL_MAX_SUBNET_COUNT, + GLOBAL_MAX_SUBNET_COUNT + 1, + 0xFFFE / 2, + 0xFFFE, + 0xFFFF, + ] + .iter() + .for_each(|netuid_index| { + let expected_netuid = (*netuid_index as u64 % GLOBAL_MAX_SUBNET_COUNT as u64) as u16; + let expected_subid = (*netuid_index as u64 / GLOBAL_MAX_SUBNET_COUNT as u64) as u8; + + // Allow subnet ID + NetworksAdded::::insert(NetUid::from(expected_netuid), true); + SubsubnetCountCurrent::::insert( + NetUid::from(expected_netuid), + SubId::from(expected_subid + 1), + ); + + let (netuid, subid) = + SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) + .unwrap(); + assert_eq!(netuid, NetUid::from(expected_netuid as u16)); + assert_eq!(subid, SubId::from(expected_subid as u8)); + }); + }); +} + +#[test] +fn test_netuid_index_math_constants() { + assert_eq!( + GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_SUBSUBNET_COUNT_PER_SUBNET as u64, + 0x10000 + ); } #[test] From 9ddf6dc85f24cf504ab3993531fe9002ebdd3697 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 20:50:45 -0400 Subject: [PATCH 105/379] Add tests for subsubnets --- pallets/subtensor/src/tests/subsubnet.rs | 266 +++++++++++++++++++++++ 1 file changed, 266 insertions(+) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 41c66f0da1..afb6006dac 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -9,6 +9,8 @@ // Test plan: // - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [x] Sub-subnet validity tests +// - [x] do_set_desired tests // - [ ] Emissions are split proportionally // - [ ] Sum of split emissions is equal to rao_emission passed to epoch // - [ ] Weights can be set/commited/revealed by subsubnet @@ -25,6 +27,9 @@ use super::mock::*; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use frame_support::{assert_noop, assert_ok}; +use sp_core::U256; +use sp_std::collections::vec_deque::VecDeque; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; #[test] @@ -99,6 +104,267 @@ fn test_netuid_index_math_constants() { ); } +#[test] +fn ensure_subsubnet_exists_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 3u16.into(); + let sub_id = SubId::from(1u8); + + // ensure base subnet exists + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Allow at least 2 sub-subnets (so sub_id = 1 is valid) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + assert_ok!(SubtensorModule::ensure_subsubnet_exists(netuid, sub_id)); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 7u16.into(); + let sub_id = SubId::from(0u8); + + // Intentionally DO NOT create the base subnet + + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_subid_out_of_range() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 9u16.into(); + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Current allowed sub-subnet count is 2 => valid sub_ids: {0, 1} + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // sub_id == 2 is out of range (must be < 2) + let sub_id_eq = SubId::from(2u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_eq), + Error::::SubNetworkDoesNotExist + ); + + // sub_id > 2 is also out of range + let sub_id_gt = SubId::from(3u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_gt), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_desired_subsubnet_count_ok_minimal() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(3u16); + NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists + + assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + netuid, + SubId::from(1u8) + )); + + assert_eq!(SubsubnetCountDesired::::get(netuid), SubId::from(1u8)); + }); +} + +#[test] +fn do_set_desired_subsubnet_count_ok_at_effective_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(4u16); + NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists + + // Effective bound is min(runtime cap, compile-time cap) + let runtime_cap = MaxSubsubnetCount::::get(); // e.g., SubId::from(8) + let compile_cap = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET); + let bound = if runtime_cap <= compile_cap { + runtime_cap + } else { + compile_cap + }; + + assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + netuid, bound + )); + assert_eq!(SubsubnetCountDesired::::get(netuid), bound); + }); +} + +#[test] +fn do_set_desired_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(7u16); + // No NetworksAdded insert => base subnet absent + + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(1u8)), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_desired_fails_for_zero() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(9u16); + NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists + + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(0u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_desired_fails_when_over_runtime_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(11u16); + NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists + + // Runtime cap is 8 (per function), so 9 must fail + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(9u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_desired_fails_when_over_compile_time_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(12u16); + NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists + + let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, too_big), + Error::::InvalidValue + ); + }); +} + +#[test] +fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(1); + + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // super_block = SuperBlockTempos() * Tempo(netuid) + Tempo::::insert(netuid, 1u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + + // Choose counts so result is deterministic for ANY decrease-per-superblock. + // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. + let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(dec.saturating_add(3)); // ≥3 + let desired = SubId::from(1u8); + // min_possible = max(old - dec, 1) = 3 → new_count = 3 + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Seed data at a kept subid (2) and a removed subid (3) + let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); + + Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); + Incentive::::insert(idx_keep, vec![1u16]); + LastUpdate::::insert(idx_keep, vec![123u64]); + Bonds::::insert(idx_keep, 0u16, vec![(1u16, 2u16)]); + WeightCommits::::insert( + idx_keep, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_keep, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + Weights::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + Incentive::::insert(idx_rm3, vec![9u16]); + LastUpdate::::insert(idx_rm3, vec![999u64]); + Bonds::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + WeightCommits::::insert( + idx_rm3, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_rm3, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + // Act exactly on a super-block boundary + SubtensorModule::update_subsubnet_counts_if_needed(2 * super_block); + + // New count is 3 + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); + + // Kept prefix intact + assert_eq!(Incentive::::get(idx_keep), vec![1u16]); + assert!(Weights::::iter_prefix(idx_keep).next().is_some()); + assert!(LastUpdate::::contains_key(idx_keep)); + assert!(Bonds::::iter_prefix(idx_keep).next().is_some()); + assert!(WeightCommits::::contains_key(idx_keep, hotkey)); + assert!(TimelockedWeightCommits::::contains_key( + idx_keep, 1u64 + )); + + // Removed prefix (subid 3) cleared + assert!(Weights::::iter_prefix(idx_rm3).next().is_none()); + assert_eq!(Incentive::::get(idx_rm3), Vec::::new()); + assert!(!LastUpdate::::contains_key(idx_rm3)); + assert!(Bonds::::iter_prefix(idx_rm3).next().is_none()); + assert!(!WeightCommits::::contains_key(idx_rm3, hotkey)); + assert!(!TimelockedWeightCommits::::contains_key( + idx_rm3, 1u64 + )); + }); +} + +#[test] +fn update_subsubnet_counts_no_change_when_not_superblock() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(100u16); + NetworksAdded::::insert(NetUid::from(100u16), true); + + Tempo::::insert(netuid, 1u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + + // Setup counts as in the previous test + let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(dec.saturating_add(3)); + let desired = SubId::from(1u8); + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Marker value at a subid that would be kept if a change happened + let idx_mark = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + Incentive::::insert(idx_mark, vec![77u16]); + + // Act on a non-boundary + SubtensorModule::update_subsubnet_counts_if_needed(super_block - 1); + + // Nothing changes + assert_eq!(SubsubnetCountCurrent::::get(netuid), old); + assert_eq!(Incentive::::get(idx_mark), vec![77u16]); + }); +} + #[test] fn test_subsubnet_emission_proportions() { new_test_ext(1).execute_with(|| {}); From 163c3f755f32aaeae36cc5f773bbb3ca7176f78f Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 21:04:29 -0400 Subject: [PATCH 106/379] Fix Bonds cleanup on subnet removal --- pallets/subtensor/src/coinbase/root.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 6d2824aec9..74c75f5624 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -430,7 +430,10 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 8. Removes the weights for this subnet (do not remove). for subid in 0..subsubnets { From db6614ec07aaaeb3c35d422621f749000577eca7 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 28 Aug 2025 19:15:20 -0700 Subject: [PATCH 107/379] don't dissolve root --- pallets/subtensor/src/coinbase/root.rs | 4 ++-- pallets/subtensor/src/tests/networks.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 8f6d3c3530..aaa85522f5 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -367,7 +367,7 @@ impl Pallet { pub fn do_dissolve_network(netuid: NetUid) -> dispatch::DispatchResult { // 1. --- The network exists? ensure!( - Self::if_subnet_exist(netuid), + Self::if_subnet_exist(netuid) && netuid != NetUid::ROOT, Error::::SubNetworkDoesNotExist ); @@ -418,7 +418,7 @@ impl Pallet { for (uid_i, weights_i) in as frame_support::storage::IterableStorageDoubleMap< NetUid, u16, - sp_std::vec::Vec<(u16, u16)>, + Vec<(u16, u16)>, >>::iter_prefix(NetUid::ROOT) { let mut modified_weights = weights_i.clone(); diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 4c00266587..d62bc4fd2f 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -1114,7 +1114,7 @@ fn prune_selection_complex_state_exhaustive() { // Remove n5; now n6 (price=0) should be selected. // This validates robustness to holes / non-contiguous netuids. // --------------------------------------------------------------------- - SubtensorModule::remove_network(n5); + SubtensorModule::do_dissolve_network(n5).expect("Expected not to panic"); assert_eq!( SubtensorModule::get_network_to_prune(), Some(n6), From 9760e009ace511755bd1378866c9c0000cc27fac Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 29 Aug 2025 10:53:32 +0000 Subject: [PATCH 108/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index aed0c150de..d9277d2c8e 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -120,9 +120,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(95_160_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(18_930_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -2201,7 +2201,7 @@ mod dispatches { /// * commit_reveal_version (`u16`): /// - The client (bittensor-drand) version #[pallet::call_index(113)] - #[pallet::weight((Weight::from_parts(64_530_000, 0) + #[pallet::weight((Weight::from_parts(80_110_000, 0) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( From 485b95fdd0cda81617224b3dc46b59239d8379bc Mon Sep 17 00:00:00 2001 From: open-junius Date: Fri, 29 Aug 2025 22:29:04 +0800 Subject: [PATCH 109/379] bump versoin --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 00cc24b411..679e5e55b2 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 307, + spec_version: 308, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From d84daecbbf3eaafdff40ddfef37b98e69aed1cd5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 29 Aug 2025 16:39:28 +0000 Subject: [PATCH 110/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index aed0c150de..60f407c33f 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -120,9 +120,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(95_160_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(18_910_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, From c42de52ca00ac396a503ca68f9a7d3ef4f7b7fb4 Mon Sep 17 00:00:00 2001 From: unconst Date: Fri, 29 Aug 2025 13:23:30 -0500 Subject: [PATCH 111/379] commit Cargo.lock --- pallets/admin-utils/src/lib.rs | 38 ++++- pallets/subtensor/src/subnets/uids.rs | 143 +++++++++++++++++-- pallets/subtensor/src/utils/rate_limiting.rs | 7 +- 3 files changed, 168 insertions(+), 20 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 0b1498fd44..74357ea675 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -677,12 +677,12 @@ pub mod pallet { ensure!( min_burn < TaoCurrency::from(1_000_000_000), Error::::ValueNotInBounds - ) + ); // Min burn must be less than max burn ensure!( - min_burn > pallet_subtensor::Pallet::::MaxBurn(netuid), + min_burn > pallet_subtensor::Pallet::::get_max_burn(netuid), Error::::ValueNotInBounds - ) + ); pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); log::debug!("MinBurnSet( netuid: {netuid:?} min_burn: {min_burn:?} ) "); Ok(()) @@ -709,12 +709,12 @@ pub mod pallet { ensure!( max_burn > TaoCurrency::from(100_000_000), Error::::ValueNotInBounds - ) + ); // Max burn must be greater than min burn ensure!( - max_burn > pallet_subtensor::Pallet::::MinBurn(netuid), + max_burn > pallet_subtensor::Pallet::::get_min_burn(netuid), Error::::ValueNotInBounds - ) + ); pallet_subtensor::Pallet::::set_max_burn(netuid, max_burn); log::debug!("MaxBurnSet( netuid: {netuid:?} max_burn: {max_burn:?} ) "); Ok(()) @@ -1703,6 +1703,32 @@ pub mod pallet { pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; Ok(()) } + + /// Sets the number of immune owner neurons + #[pallet::call_index(74)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_trim_to_max_allowed_uids( + origin: OriginFor, + netuid: NetUid, + max_n: u16, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + if let Ok(RawOrigin::Signed(who)) = origin.into() { + ensure!( + pallet_subtensor::Pallet::::passes_rate_limit_on_subnet( + &TransactionType::SetMaxAllowedUIDS, + &who, + netuid, + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + } + pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; + Ok(()) + } + } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index f5a14c490b..e31701c9cc 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,17 +16,6 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default - pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { - let neuron_index: usize = neuron_uid.into(); - Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); - Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. - } - /// Replace the neuron under this uid. pub fn replace_neuron( netuid: NetUid, @@ -107,6 +96,138 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } + /// Appends the uid to the network. + pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { + let neuron_index: usize = neuron_uid.into(); + Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); + Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. + } + + pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { + + // Reasonable limits + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + ensure!( max_n > 16, Error::::InvalidValue ); + ensure!( max_n <= Self::get_max_allowed_uids( netuid ), Error::::InvalidValue ); + + // Set the value. + MaxAllowedUids::::insert(netuid, max_n); + + // Check if we need to trim. + let current_n: u16 = Self::get_subnetwork_n(netuid); + + // We need to trim, get rid of values between max_n and current_n. + if current_n > max_n { + + let ranks: Vec = Rank::::get(netuid); + let trimmed_ranks: Vec = ranks.into_iter().take(max_n as usize).collect(); + Rank::::insert(netuid, trimmed_ranks); + + let trust: Vec = Trust::::get(netuid); + let trimmed_trust: Vec = trust.into_iter().take(max_n as usize).collect(); + Trust::::insert(netuid, trimmed_trust); + + let active: Vec = Active::::get(netuid); + let trimmed_active: Vec = active.into_iter().take(max_n as usize).collect(); + Active::::insert(netuid, trimmed_active); + + let emission: Vec = Emission::::get(netuid); + let trimmed_emission: Vec = emission.into_iter().take(max_n as usize).collect(); + Emission::::insert(netuid, trimmed_emission); + + let consensus: Vec = Consensus::::get(netuid); + let trimmed_consensus: Vec = consensus.into_iter().take(max_n as usize).collect(); + Consensus::::insert(netuid, trimmed_consensus); + + let incentive: Vec = Incentive::::get(netuid); + let trimmed_incentive: Vec = incentive.into_iter().take(max_n as usize).collect(); + Incentive::::insert(netuid, trimmed_incentive); + + let dividends: Vec = Dividends::::get(netuid); + let trimmed_dividends: Vec = dividends.into_iter().take(max_n as usize).collect(); + Dividends::::insert(netuid, trimmed_dividends); + + let lastupdate: Vec = LastUpdate::::get(netuid); + let trimmed_lastupdate: Vec = lastupdate.into_iter().take(max_n as usize).collect(); + LastUpdate::::insert(netuid, trimmed_lastupdate); + + let pruning_scores: Vec = PruningScores::::get(netuid); + let trimmed_pruning_scores: Vec = pruning_scores.into_iter().take(max_n as usize).collect(); + PruningScores::::insert(netuid, trimmed_pruning_scores); + + let vtrust: Vec = ValidatorTrust::::get(netuid); + let trimmed_vtrust: Vec = vtrust.into_iter().take(max_n as usize).collect(); + ValidatorTrust::::insert(netuid, trimmed_vtrust); + + let vpermit: Vec = ValidatorPermit::::get(netuid); + let trimmed_vpermit: Vec = vpermit.into_iter().take(max_n as usize).collect(); + ValidatorPermit::::insert(netuid, trimmed_vpermit); + + let stake_weight: Vec = StakeWeight::::get(netuid); + let trimmed_stake_weight: Vec = stake_weight.into_iter().take(max_n as usize).collect(); + StakeWeight::::insert(netuid, trimmed_stake_weight); + + // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) + // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 + for uid in max_n..current_n { + if let Some(hotkey) = Keys::::try_get(netuid, uid).ok() { + Uids::::remove(netuid, &hotkey); + // Remove IsNetworkMember association for the hotkey + IsNetworkMember::::remove(&hotkey, netuid); + // Remove last hotkey emission for the hotkey + LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); + // Remove alpha dividends for the hotkey + AlphaDividendsPerSubnet::::remove(netuid, &hotkey); + // Remove tao dividends for the hotkey + TaoDividendsPerSubnet::::remove(netuid, &hotkey); + } + Keys::::remove(netuid, uid); + // Remove block at registration for the uid + BlockAtRegistration::::remove(netuid, uid); + } + + // Trim weights and bonds for removed UIDs + for uid in max_n..current_n { + Weights::::remove(netuid, uid); + Bonds::::remove(netuid, uid); + } + + // Trim axons, certificates, and prometheus info for removed hotkeys + for uid in max_n..current_n { + if let Some(hotkey) = Keys::::try_get(netuid, uid).ok() { + Axons::::remove(netuid, &hotkey); + NeuronCertificates::::remove(netuid, &hotkey); + Prometheus::::remove(netuid, &hotkey); + } + } + + // Trim weight and bond connections to removed UIDs for remaining neurons + // UIDs 0 to max_n-1 are kept, so we iterate through these valid UIDs + for uid in 0..max_n { + Weights::::mutate(netuid, uid, |weights| { + weights.retain(|(target_uid, _)| *target_uid < max_n); + }); + Bonds::::mutate(netuid, uid, |bonds| { + bonds.retain(|(target_uid, _)| *target_uid < max_n); + }); + } + + // Update the subnetwork size + SubnetworkN::::insert(netuid, max_n); + } + + // --- Ok and done. + Ok(()) + } + + /// Returns true if the uid is set on the network. /// pub fn is_uid_exist_on_network(netuid: NetUid, uid: u16) -> bool { diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index eeb5b96ddb..9cb2d4ffb3 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -11,6 +11,7 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, + SetMaxAllowedUIDS, } /// Implement conversion from TransactionType to u16 @@ -23,6 +24,7 @@ impl From for u16 { TransactionType::RegisterNetwork => 3, TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, + TransactionType::SetMaxAllowedUIDS => 6, } } } @@ -36,6 +38,7 @@ impl From for TransactionType { 3 => TransactionType::RegisterNetwork, 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, + 6 => TransactionType::SetMaxAllowedUIDS, _ => TransactionType::Unknown, } } @@ -50,7 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - + TransactionType::SetMaxAllowedUIDS => 7200 * 30, TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, } @@ -62,7 +65,6 @@ impl Pallet { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), - _ => Self::get_rate_limit(tx_type), } } @@ -89,7 +91,6 @@ impl Pallet { let block: u64 = Self::get_current_block_as_u64(); let limit: u64 = Self::get_rate_limit_on_subnet(tx_type, netuid); let last_block: u64 = Self::get_last_transaction_block_on_subnet(hotkey, netuid, tx_type); - Self::check_passes_rate_limit(limit, block, last_block) } From bf7478ea122b6a500bb25b30d27a9603f583691e Mon Sep 17 00:00:00 2001 From: unconst Date: Fri, 29 Aug 2025 13:24:47 -0500 Subject: [PATCH 112/379] cargo clippy --- pallets/subtensor/src/subnets/uids.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index e31701c9cc..fe0380e583 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -177,7 +177,7 @@ impl Pallet { // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 for uid in max_n..current_n { - if let Some(hotkey) = Keys::::try_get(netuid, uid).ok() { + if let Ok(hotkey) = Keys::::try_get(netuid, uid) { Uids::::remove(netuid, &hotkey); // Remove IsNetworkMember association for the hotkey IsNetworkMember::::remove(&hotkey, netuid); @@ -201,7 +201,7 @@ impl Pallet { // Trim axons, certificates, and prometheus info for removed hotkeys for uid in max_n..current_n { - if let Some(hotkey) = Keys::::try_get(netuid, uid).ok() { + if let Ok(hotkey) = Keys::::try_get(netuid, uid) { Axons::::remove(netuid, &hotkey); NeuronCertificates::::remove(netuid, &hotkey); Prometheus::::remove(netuid, &hotkey); From 7e11d96fc75f64c9530f590f12b14833d5877d55 Mon Sep 17 00:00:00 2001 From: unconst Date: Fri, 29 Aug 2025 13:25:49 -0500 Subject: [PATCH 113/379] cargo fmt --- pallets/admin-utils/src/lib.rs | 7 +++---- pallets/subtensor/src/subnets/uids.rs | 28 +++++++++++++++------------ 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 74357ea675..38654e0cae 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1710,9 +1710,9 @@ pub mod pallet { .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_trim_to_max_allowed_uids( - origin: OriginFor, - netuid: NetUid, - max_n: u16, + origin: OriginFor, + netuid: NetUid, + max_n: u16, ) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; if let Ok(RawOrigin::Signed(who)) = origin.into() { @@ -1728,7 +1728,6 @@ pub mod pallet { pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; Ok(()) } - } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index fe0380e583..69c64b3817 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -106,26 +106,27 @@ impl Pallet { Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } - - pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { + pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { // Reasonable limits ensure!( Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - ensure!( max_n > 16, Error::::InvalidValue ); - ensure!( max_n <= Self::get_max_allowed_uids( netuid ), Error::::InvalidValue ); + ensure!(max_n > 16, Error::::InvalidValue); + ensure!( + max_n <= Self::get_max_allowed_uids(netuid), + Error::::InvalidValue + ); // Set the value. MaxAllowedUids::::insert(netuid, max_n); // Check if we need to trim. let current_n: u16 = Self::get_subnetwork_n(netuid); - + // We need to trim, get rid of values between max_n and current_n. if current_n > max_n { - let ranks: Vec = Rank::::get(netuid); let trimmed_ranks: Vec = ranks.into_iter().take(max_n as usize).collect(); Rank::::insert(netuid, trimmed_ranks); @@ -139,7 +140,8 @@ impl Pallet { Active::::insert(netuid, trimmed_active); let emission: Vec = Emission::::get(netuid); - let trimmed_emission: Vec = emission.into_iter().take(max_n as usize).collect(); + let trimmed_emission: Vec = + emission.into_iter().take(max_n as usize).collect(); Emission::::insert(netuid, trimmed_emission); let consensus: Vec = Consensus::::get(netuid); @@ -155,11 +157,13 @@ impl Pallet { Dividends::::insert(netuid, trimmed_dividends); let lastupdate: Vec = LastUpdate::::get(netuid); - let trimmed_lastupdate: Vec = lastupdate.into_iter().take(max_n as usize).collect(); + let trimmed_lastupdate: Vec = + lastupdate.into_iter().take(max_n as usize).collect(); LastUpdate::::insert(netuid, trimmed_lastupdate); let pruning_scores: Vec = PruningScores::::get(netuid); - let trimmed_pruning_scores: Vec = pruning_scores.into_iter().take(max_n as usize).collect(); + let trimmed_pruning_scores: Vec = + pruning_scores.into_iter().take(max_n as usize).collect(); PruningScores::::insert(netuid, trimmed_pruning_scores); let vtrust: Vec = ValidatorTrust::::get(netuid); @@ -171,9 +175,10 @@ impl Pallet { ValidatorPermit::::insert(netuid, trimmed_vpermit); let stake_weight: Vec = StakeWeight::::get(netuid); - let trimmed_stake_weight: Vec = stake_weight.into_iter().take(max_n as usize).collect(); + let trimmed_stake_weight: Vec = + stake_weight.into_iter().take(max_n as usize).collect(); StakeWeight::::insert(netuid, trimmed_stake_weight); - + // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 for uid in max_n..current_n { @@ -226,7 +231,6 @@ impl Pallet { // --- Ok and done. Ok(()) } - /// Returns true if the uid is set on the network. /// From db2aa8c577ddf08fcc39d853d6e0116769866b27 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 29 Aug 2025 17:51:05 -0400 Subject: [PATCH 114/379] Add more tests for subsubnets. Merge test plan with bit --- pallets/subtensor/src/tests/subsubnet.rs | 230 +++++++++++++++++++++-- 1 file changed, 219 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index afb6006dac..09e43b6f13 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -11,22 +11,29 @@ // - [x] Netuid index math (with SubsubnetCountCurrent limiting) // - [x] Sub-subnet validity tests // - [x] do_set_desired tests -// - [ ] Emissions are split proportionally -// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +// - [x] Emissions are split proportionally +// - [x] Sum of split emissions is equal to rao_emission passed to epoch +// - [ ] Only subnet owner or root can set desired subsubnet count // - [ ] Weights can be set/commited/revealed by subsubnet -// - [ ] Rate limiting is enforced by subsubnet -// - [ ] Bonds are applied per subsubnet -// - [ ] Incentives are per subsubnet -// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) -// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [ ] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force +// - [ ] When a miner is deregistered, their weights are cleaned across all subsubnets +// - [ ] Weight setting rate limiting is enforced by subsubnet +// - [x] Bonds are applied per subsubnet +// - [x] Incentives are per subsubnet +// - [x] Per-subsubnet incentives are distributed proportionally to miner weights +// - [x] Subsubnet limit can be set up to 8 (with admin pallet) +// - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state -// - [ ] Subsubnet epoch terms persist in state +// - [x] Subsubnet epoch terms persist in state +// - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake +// - [ ] Miner with no weights on any subsubnet receives no reward use super::mock::*; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use approx::assert_abs_diff_eq; use frame_support::{assert_noop, assert_ok}; use sp_core::U256; use sp_std::collections::vec_deque::VecDeque; @@ -366,6 +373,207 @@ fn update_subsubnet_counts_no_change_when_not_superblock() { } #[test] -fn test_subsubnet_emission_proportions() { - new_test_ext(1).execute_with(|| {}); +fn split_emissions_even_division() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(25u64)); + assert_eq!(out, vec![AlphaCurrency::from(5u64); 5]); + }); +} + +#[test] +fn split_emissions_rounding_to_first() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(6u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(4u8)); // 4 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(10u64)); // 10 / 4 = 2, rem=2 + assert_eq!( + out, + vec![ + AlphaCurrency::from(4u64), // 2 + remainder(2) + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + ] + ); + }); +} + +/// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero +/// incentives & dividends. +/// Returns the sub-subnet storage index. +pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Base subnet exists; 2 neurons. + NetworksAdded::::insert(NetUid::from(u16::from(netuid)), true); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + SubnetworkN::::insert(netuid, 2); + + // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. + Keys::::insert(netuid, 0u16, hk0.clone()); + Keys::::insert(netuid, 1u16, hk1.clone()); + + // Make both ACTIVE: recent updates & old registrations. + Tempo::::insert(netuid, 1u16); + ActivityCutoff::::insert(netuid, u16::MAX); // large cutoff keeps them active + LastUpdate::::insert(idx0, vec![2, 2]); + LastUpdate::::insert(idx1, vec![2, 2]); + BlockAtRegistration::::insert(netuid, 0, 1u64); // registered long ago + BlockAtRegistration::::insert(netuid, 1, 1u64); + + // Add stake + let stake_amount = AlphaCurrency::from(1_000_000_000); // 1 Alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + stake_amount, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + stake_amount, + ); + + // Non-zero stake above threshold; permit both as validators. + StakeThreshold::::put(0u64); + ValidatorPermit::::insert(netuid, vec![true, true]); + + // Simple weights, setting for each other on both subsubnets + Weights::::insert(idx0, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx0, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + // Keep weight masking off for simplicity. + CommitRevealWeightsEnabled::::insert(netuid, false); + Yuma3On::::insert(netuid, false); +} + +pub fn mock_3_neurons(netuid: NetUid, hk: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + SubnetworkN::::insert(netuid, 3); + Keys::::insert(netuid, 2u16, hk.clone()); + LastUpdate::::insert(idx0, vec![2, 2, 2]); + LastUpdate::::insert(idx1, vec![2, 2, 2]); + BlockAtRegistration::::insert(netuid, 2, 1u64); +} + +#[test] +fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF / 2; + assert_eq!(actual_incentive_sub0[0], expected_incentive); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub1[0], expected_incentive); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + }); +} + +#[test] +fn epoch_with_subsubnets_updates_bonds() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + + // Cause bonds to be asymmetric on diff subsubnets + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let bonds_uid0_sub0 = Bonds::::get(idx0, 0); + let bonds_uid1_sub0 = Bonds::::get(idx0, 1); + let bonds_uid0_sub1 = Bonds::::get(idx1, 0); + let bonds_uid1_sub1 = Bonds::::get(idx1, 1); + + // Subsubnet 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub0, vec![(1, 65535)]); + assert_eq!(bonds_uid1_sub0, vec![(0, 65535)]); + + // Subsubnet 1: UID0 no bond to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub1, vec![]); + assert_eq!(bonds_uid1_sub1, vec![(0, 65535)]); + }); +} + +#[test] +fn epoch_with_subsubnets_incentives_proportional_to_weights() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set greater weight to uid1 on sub-subnet 0 and to uid2 on subsubnet 1 + Weights::::insert(idx0, 0, vec![(1u16, 0xFFFF / 5 * 4), (2u16, 0xFFFF / 5)]); + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + + let expected_incentive_high = 0xFFFF / 5 * 4; + let expected_incentive_low = 0xFFFF / 5; + assert_abs_diff_eq!( + actual_incentive_sub0[1], + expected_incentive_high, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub0[2], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[1], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[2], + expected_incentive_high, + epsilon = 1 + ); + }); } From 07a2f7059e54fdfcbb1570c0f0710c7c95903c9a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 30 Aug 2025 15:29:50 +0000 Subject: [PATCH 115/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 3ca50fac5d..fff6c14f0e 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -197,9 +197,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(19_330_000, 0) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(100_500_000, 0) + .saturating_add(T::DbWeight::get().reads(15_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -790,7 +790,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(340_400_000, 0) + #[pallet::weight((Weight::from_parts(439_200_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -2406,7 +2406,7 @@ mod dispatches { /// * commit_reveal_version (`u16`): /// - The client (bittensor-drand) version #[pallet::call_index(113)] - #[pallet::weight((Weight::from_parts(64_530_000, 0) + #[pallet::weight((Weight::from_parts(84_020_000, 0) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( From aa08d50c086785e9eae79f19bca957338375394f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 31 Aug 2025 19:39:25 +0000 Subject: [PATCH 116/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index b6e01acb4f..eb2706f416 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -120,9 +120,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(19_330_000, 0) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(95_140_000, 0) + .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -777,7 +777,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(41_320_000, 0) + #[pallet::weight((Weight::from_parts(31_440_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( From 7ae3dc862f9154d47d9414cd70507c24d7db82d3 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Mon, 1 Sep 2025 16:46:46 +0300 Subject: [PATCH 117/379] Disable Keys::remove check for trim_to_max_allowed_uids --- pallets/subtensor/src/subnets/uids.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 69c64b3817..d2ebd938bf 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -193,6 +193,7 @@ impl Pallet { // Remove tao dividends for the hotkey TaoDividendsPerSubnet::::remove(netuid, &hotkey); } + #[allow(unknown_lints)] Keys::::remove(netuid, uid); // Remove block at registration for the uid BlockAtRegistration::::remove(netuid, uid); From d00cfc1d8f0b1b28e01aa960fa6e4a6f2588c945 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Mon, 1 Sep 2025 17:08:34 +0300 Subject: [PATCH 118/379] Add benchmark for trim_to_max_uids --- pallets/admin-utils/src/benchmarking.rs | 11 +++++++++++ pallets/subtensor/src/subnets/uids.rs | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 61df5d55f8..e3397cfbfc 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -346,5 +346,16 @@ mod benchmarks { _(RawOrigin::Root, 5u16/*version*/)/*sudo_set_commit_reveal_version()*/; } + #[benchmark] + fn sudo_trim_to_max_allowed_uids() { + pallet_subtensor::Pallet::::init_new_network( + 1u16.into(), /*netuid*/ + 1u16, /*sudo_tempo*/ + ); + + #[extrinsic_call] + _(RawOrigin::Root, 1u16.into()/*netuid*/, 4097u16/*max_allowed_uids*/)/*sudo_trim_to_max_allowed_uids()*/; + } + //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index d2ebd938bf..0eb3be2ddb 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -193,7 +193,7 @@ impl Pallet { // Remove tao dividends for the hotkey TaoDividendsPerSubnet::::remove(netuid, &hotkey); } - #[allow(unknown_lints)] + #[allow(unknown_lints)] Keys::::remove(netuid, uid); // Remove block at registration for the uid BlockAtRegistration::::remove(netuid, uid); From 3c0ffb1088182bd22e67669258f796ff2a5201bb Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 2 Sep 2025 10:52:48 -0400 Subject: [PATCH 119/379] Fix wrong way ensure for min burn value, fix outdated comments --- pallets/admin-utils/src/lib.rs | 6 +++--- pallets/subtensor/src/subnets/uids.rs | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 38654e0cae..a511266a7f 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -680,7 +680,7 @@ pub mod pallet { ); // Min burn must be less than max burn ensure!( - min_burn > pallet_subtensor::Pallet::::get_max_burn(netuid), + min_burn <= pallet_subtensor::Pallet::::get_max_burn(netuid), Error::::ValueNotInBounds ); pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); @@ -707,7 +707,7 @@ pub mod pallet { ); // Max burn must be greater than 0.1 TAO. ensure!( - max_burn > TaoCurrency::from(100_000_000), + max_burn >= TaoCurrency::from(100_000_000), Error::::ValueNotInBounds ); // Max burn must be greater than min burn @@ -1704,7 +1704,7 @@ pub mod pallet { Ok(()) } - /// Sets the number of immune owner neurons + /// Sets the maximum allowed UIDs for a subnet #[pallet::call_index(74)] #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 69c64b3817..0a1121aced 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -96,7 +96,8 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } - /// Appends the uid to the network. + /// Clears (sets to default) the neuron map values fot a neuron when it is + /// removed from the subnet pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); From 241c33c3097d1ba850279f867273a5162ce78b64 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 2 Sep 2025 18:06:36 +0000 Subject: [PATCH 120/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 311aecf667..4d31a5829d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -198,7 +198,7 @@ mod dispatches { /// #[pallet::call_index(80)] #[pallet::weight((Weight::from_parts(95_460_000, 0) - .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, @@ -678,7 +678,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(439_200_000, 0) + #[pallet::weight((Weight::from_parts(340_800_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -999,7 +999,7 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(111_700_000, 0) + #[pallet::weight((Weight::from_parts(135_900_000, 0) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { From 24db024171f4d02e7b8ab6f28b9fc82d08b5a57b Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 3 Sep 2025 15:00:53 +0300 Subject: [PATCH 121/379] Add rate-limited origin checks --- pallets/subtensor/src/lib.rs | 24 ++++++ pallets/subtensor/src/macros/errors.rs | 2 + pallets/subtensor/src/utils/misc.rs | 106 ++++++++++++++++++++++++- 3 files changed, 131 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 4c2eaf0cc7..6efd938608 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -867,10 +867,32 @@ pub mod pallet { 50400 } + #[pallet::type_value] + /// Default value for subnet owner hyperparameter update rate limit (in blocks) + pub fn DefaultOwnerHyperparamRateLimit() -> u64 { + 0 + } + + #[pallet::type_value] + /// Default number of terminal blocks in a tempo during which admin operations are prohibited + pub fn DefaultAdminFreezeWindow() -> u16 { + 10 + } + #[pallet::storage] pub type MinActivityCutoff = StorageValue<_, u16, ValueQuery, DefaultMinActivityCutoff>; + #[pallet::storage] + /// Global window (in blocks) at the end of each tempo where admin ops are disallowed + pub type AdminFreezeWindow = + StorageValue<_, u16, ValueQuery, DefaultAdminFreezeWindow>; + + #[pallet::storage] + /// Global rate limit (in blocks) for subnet owner hyperparameter updates + pub type OwnerHyperparamRateLimit = + StorageValue<_, u64, ValueQuery, DefaultOwnerHyperparamRateLimit>; + #[pallet::storage] pub type ColdkeySwapScheduleDuration = StorageValue<_, BlockNumberFor, ValueQuery, DefaultColdkeySwapScheduleDuration>; @@ -2138,6 +2160,8 @@ impl> pub enum RateLimitKey { // The setting sn owner hotkey operation is rate limited per netuid SetSNOwnerHotkey(NetUid), + // Generic rate limit for subnet-owner hyperparameter updates (per netuid) + OwnerHyperparamUpdate(NetUid), } pub trait ProxyInterface { diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index e6d9c231d1..ed6ca3c002 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -238,6 +238,8 @@ mod errors { BeneficiaryDoesNotOwnHotkey, /// Expected beneficiary origin. ExpectedBeneficiaryOrigin, + /// Admin operation is prohibited during the protected weights window + AdminActionProhibitedDuringWeightsWindow, /// Symbol does not exist. SymbolDoesNotExist, /// Symbol already in use. diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index f64962f094..b7f3e1288e 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - Error, + Error, RateLimitKey, system::{ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor}, }; use safe_math::*; @@ -33,6 +33,110 @@ impl Pallet { } } + /// Like `ensure_root` but also prohibits calls during the last N blocks of the tempo. + pub fn ensure_root_with_rate_limit( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result<(), DispatchError> { + ensure_root(o)?; + let now = Self::get_current_block_as_u64(); + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + Ok(()) + } + + /// Like `ensure_subnet_owner` but also checks transaction rate limits. + pub fn ensure_sn_owner_with_rate_limit( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result<(), DispatchError> { + Self::ensure_subnet_owner(o, netuid)?; + let now = Self::get_current_block_as_u64(); + // Disallow inside freeze window and enforce owner hyperparam rate limit + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + Self::ensure_owner_hparam_rate_limit(netuid, now)?; + Ok(()) + } + + /// Like `ensure_subnet_owner_or_root` but also checks transaction rate limits. + /// Root is not rate-limited outside the freeze window, but is also prohibited inside it. + pub fn ensure_sn_owner_or_root_with_rate_limit( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result<(), DispatchError> { + let now = Self::get_current_block_as_u64(); + + // If root, only enforce freeze window. + if ensure_root(o.clone()).is_ok() { + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + return Ok(()); + } + + // Otherwise ensure subnet owner and apply both checks. + Self::ensure_subnet_owner(o, netuid)?; + Self::ensure_not_in_admin_freeze_window(netuid, now)?; + Self::ensure_owner_hparam_rate_limit(netuid, now)?; + + Ok(()) + } + + /// Returns true if the current block is within the terminal freeze window of the tempo for the + /// given subnet. During this window, admin ops are prohibited to avoid interference with + /// validator weight submissions. + pub fn is_in_admin_freeze_window(netuid: NetUid, current_block: u64) -> bool { + let tempo = Self::get_tempo(netuid); + if tempo == 0 { + return false; + } + let remaining = Self::blocks_until_next_epoch(netuid, tempo, current_block); + let window = AdminFreezeWindow::::get() as u64; + remaining < window + } + + fn ensure_not_in_admin_freeze_window(netuid: NetUid, now: u64) -> Result<(), DispatchError> { + ensure!( + !Self::is_in_admin_freeze_window(netuid, now), + Error::::AdminActionProhibitedDuringWeightsWindow + ); + Ok(()) + } + + fn ensure_owner_hparam_rate_limit(netuid: NetUid, now: u64) -> Result<(), DispatchError> { + let limit = OwnerHyperparamRateLimit::::get(); + if limit > 0 { + let last = + Self::get_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid)); + ensure!( + now.saturating_sub(last) >= limit || last == 0, + Error::::TxRateLimitExceeded + ); + } + Ok(()) + } + + // === Admin freeze window accessors === + pub fn get_admin_freeze_window() -> u16 { + AdminFreezeWindow::::get() + } + + pub fn set_admin_freeze_window(window: u16) { + AdminFreezeWindow::::set(window); + } + + /// Helper to be called after a successful owner hyperparameter update. + /// Records the current block against the OwnerHyperparamUpdate rate limit key. + pub fn mark_owner_hyperparam_update(netuid: NetUid) { + let now = Self::get_current_block_as_u64(); + Self::set_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid), now); + } + + pub fn get_owner_hyperparam_rate_limit() -> u64 { + OwnerHyperparamRateLimit::::get() + } + + pub fn set_owner_hyperparam_rate_limit(limit: u64) { + OwnerHyperparamRateLimit::::set(limit); + } + // ======================== // ==== Global Setters ==== // ======================== From 86cce6f473bc63a9afcf2fa3e9fd877665e9a973 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 3 Sep 2025 11:49:22 -0300 Subject: [PATCH 122/379] add test + small refacto --- pallets/admin-utils/src/lib.rs | 29 +------ pallets/admin-utils/src/tests/mod.rs | 108 +++++++++++++++++++++++++- pallets/subtensor/src/lib.rs | 2 +- pallets/subtensor/src/subnets/uids.rs | 24 ++---- 4 files changed, 120 insertions(+), 43 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index a511266a7f..da81f2e208 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -107,8 +107,6 @@ pub mod pallet { BondsMovingAverageMaxReached, /// Only root can set negative sigmoid steepness values NegativeSigmoidSteepness, - /// Value not in allowed bounds. - ValueNotInBounds, } /// Enum for specifying the type of precompile operation. #[derive( @@ -667,22 +665,12 @@ pub mod pallet { netuid: NetUid, min_burn: TaoCurrency, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - // Allow set min_burn but only up to 1 TAO for spamming. + ensure_root(origin)?; + ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - // Min burn must be less than 1 TAO. - ensure!( - min_burn < TaoCurrency::from(1_000_000_000), - Error::::ValueNotInBounds - ); - // Min burn must be less than max burn - ensure!( - min_burn <= pallet_subtensor::Pallet::::get_max_burn(netuid), - Error::::ValueNotInBounds - ); pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); log::debug!("MinBurnSet( netuid: {netuid:?} min_burn: {min_burn:?} ) "); Ok(()) @@ -700,21 +688,12 @@ pub mod pallet { netuid: NetUid, max_burn: TaoCurrency, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + ensure_root(origin)?; + ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - // Max burn must be greater than 0.1 TAO. - ensure!( - max_burn >= TaoCurrency::from(100_000_000), - Error::::ValueNotInBounds - ); - // Max burn must be greater than min burn - ensure!( - max_burn > pallet_subtensor::Pallet::::get_min_burn(netuid), - Error::::ValueNotInBounds - ); pallet_subtensor::Pallet::::set_max_burn(netuid, max_burn); log::debug!("MaxBurnSet( netuid: {netuid:?} max_burn: {max_burn:?} ) "); Ok(()) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 5290d3ddfc..01bd0e68f1 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -5,7 +5,10 @@ use frame_support::{ traits::Hooks, }; use frame_system::Config; -use pallet_subtensor::{Error as SubtensorError, SubnetOwner, Tempo, WeightsVersionKeyRateLimit}; +use pallet_subtensor::{ + Error as SubtensorError, MaxRegistrationsPerBlock, Rank, SubnetOwner, + TargetRegistrationsPerInterval, Tempo, WeightsVersionKeyRateLimit, *, +}; // use pallet_subtensor::{migrations, Event}; use pallet_subtensor::Event; use sp_consensus_grandpa::AuthorityId as GrandpaId; @@ -1951,3 +1954,106 @@ fn test_sudo_set_commit_reveal_version() { ); }); } + +#[test] +fn test_trim_to_max_allowed_uids() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + + // Add some neurons + let max_n = 32; + for i in 1..=max_n { + let n = i * 1000; + register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); + } + + // Run some block to ensure stake weights are set + run_to_block(20); + + // Normal case + let new_max_n = 20; + assert_ok!(AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + new_max_n + )); + + // Ensure storage has been trimmed + assert_eq!(MaxAllowedUids::::get(netuid), new_max_n); + assert_eq!(Rank::::get(netuid).len(), new_max_n as usize); + assert_eq!(Trust::::get(netuid).len(), new_max_n as usize); + assert_eq!(Active::::get(netuid).len(), new_max_n as usize); + assert_eq!(Emission::::get(netuid).len(), new_max_n as usize); + assert_eq!(Consensus::::get(netuid).len(), new_max_n as usize); + assert_eq!(Incentive::::get(netuid).len(), new_max_n as usize); + assert_eq!(Dividends::::get(netuid).len(), new_max_n as usize); + assert_eq!(LastUpdate::::get(netuid).len(), new_max_n as usize); + assert_eq!(PruningScores::::get(netuid).len(), new_max_n as usize); + assert_eq!( + ValidatorTrust::::get(netuid).len(), + new_max_n as usize + ); + assert_eq!( + ValidatorPermit::::get(netuid).len(), + new_max_n as usize + ); + assert_eq!(StakeWeight::::get(netuid).len(), new_max_n as usize); + + for uid in max_n..new_max_n { + assert!(!Keys::::contains_key(netuid, uid)); + assert!(!BlockAtRegistration::::contains_key(netuid, uid)); + assert!(!Weights::::contains_key(netuid, uid)); + assert!(!Bonds::::contains_key(netuid, uid)); + } + + for uid in 0..max_n { + assert!( + Weights::::get(netuid, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a weight with target_uid >= new_max_n" + ); + assert!( + Bonds::::get(netuid, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a bond with target_uid >= new_max_n" + ); + } + + assert_eq!(SubnetworkN::::get(netuid), new_max_n); + + // Non existent subnet + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + NetUid::from(42), + new_max_n + ), + pallet_subtensor::Error::::SubNetworkDoesNotExist + ); + + // New max n less than lower bound + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 15 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // New max n greater than upper bound + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_max_allowed_uids(netuid) + 1 + ), + pallet_subtensor::Error::::InvalidValue + ); + }); +} diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index ef41b07c78..e6d2b658aa 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1486,7 +1486,7 @@ pub mod pallet { /// ==== Subnetwork Consensus Storage ==== /// ======================================= #[pallet::storage] // --- DMAP ( netuid ) --> stake_weight | weight for stake used in YC. - pub(super) type StakeWeight = + pub type StakeWeight = StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- DMAP ( netuid, hotkey ) --> uid diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index ef7ed1b6ac..cdec803598 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -96,7 +96,7 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } - /// Clears (sets to default) the neuron map values fot a neuron when it is + /// Clears (sets to default) the neuron map values fot a neuron when it is /// removed from the subnet pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); @@ -180,9 +180,9 @@ impl Pallet { stake_weight.into_iter().take(max_n as usize).collect(); StakeWeight::::insert(netuid, trimmed_stake_weight); - // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) - // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 for uid in max_n..current_n { + // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) + // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 if let Ok(hotkey) = Keys::::try_get(netuid, uid) { Uids::::remove(netuid, &hotkey); // Remove IsNetworkMember association for the hotkey @@ -193,28 +193,20 @@ impl Pallet { AlphaDividendsPerSubnet::::remove(netuid, &hotkey); // Remove tao dividends for the hotkey TaoDividendsPerSubnet::::remove(netuid, &hotkey); + // Trim axons, certificates, and prometheus info for removed hotkeys + Axons::::remove(netuid, &hotkey); + NeuronCertificates::::remove(netuid, &hotkey); + Prometheus::::remove(netuid, &hotkey); } #[allow(unknown_lints)] Keys::::remove(netuid, uid); // Remove block at registration for the uid BlockAtRegistration::::remove(netuid, uid); - } - - // Trim weights and bonds for removed UIDs - for uid in max_n..current_n { + // Trim weights and bonds for removed UIDs Weights::::remove(netuid, uid); Bonds::::remove(netuid, uid); } - // Trim axons, certificates, and prometheus info for removed hotkeys - for uid in max_n..current_n { - if let Ok(hotkey) = Keys::::try_get(netuid, uid) { - Axons::::remove(netuid, &hotkey); - NeuronCertificates::::remove(netuid, &hotkey); - Prometheus::::remove(netuid, &hotkey); - } - } - // Trim weight and bond connections to removed UIDs for remaining neurons // UIDs 0 to max_n-1 are kept, so we iterate through these valid UIDs for uid in 0..max_n { From 3c8ff8e29689e0f9c7f5d5376d7ab6e225d60675 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 3 Sep 2025 12:42:22 -0300 Subject: [PATCH 123/379] cargo fmt --- pallets/admin-utils/src/tests/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 331d194b15..a131c7e0f3 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2175,4 +2175,3 @@ fn test_trim_to_max_allowed_uids() { ); }); } - From 0d8234f48d6857d42a017a8817c1315697740025 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 12:31:27 -0400 Subject: [PATCH 124/379] Add weight setting/removing tests --- pallets/subtensor/src/macros/dispatches.rs | 49 ++ pallets/subtensor/src/macros/events.rs | 6 +- pallets/subtensor/src/subnets/uids.rs | 18 +- pallets/subtensor/src/subnets/weights.rs | 42 +- pallets/subtensor/src/tests/epoch.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 568 +++++++++++++++++- pallets/subtensor/src/tests/weights.rs | 2 +- .../subtensor/src/transaction_extension.rs | 6 +- 8 files changed, 663 insertions(+), 29 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 311aecf667..759bcc06a8 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2254,5 +2254,54 @@ mod dispatches { commit_reveal_version, ) } + + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// * commit_reveal_version (`u16`): + /// - The client (bittensor-drand) version + #[pallet::call_index(118)] + #[pallet::weight((Weight::from_parts(84_020_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 0259863cd8..57bde5a374 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -242,20 +242,20 @@ mod events { /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - CRV3WeightsCommitted(T::AccountId, NetUid, H256), + CRV3WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully committed. /// /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - WeightsCommitted(T::AccountId, NetUid, H256), + WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully revealed. /// /// - **who**: The account ID of the user revealing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash of the revealed weights. - WeightsRevealed(T::AccountId, NetUid, H256), + WeightsRevealed(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully batch revealed. /// diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 4c029862e4..ce0b14cc1c 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,7 +16,8 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); @@ -26,6 +27,21 @@ impl Pallet { let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + + // Clear weights set BY the neuron_uid + Weights::::remove(netuid_index, neuron_uid); + + // Set weights FOR the neuron_uid to 0 + let all_uids: Vec = Weights::::iter_key_prefix(netuid_index).collect(); + for uid in all_uids { + Weights::::mutate(netuid_index, uid, |weight_vec: &mut Vec<(u16, u16)>| { + for (weight_uid, w) in weight_vec.iter_mut() { + if *weight_uid == neuron_uid { + *w = 0; + } + } + }); + } } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); } diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index f9393cd6bd..0c1ad9efd7 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -64,6 +64,9 @@ impl Pallet { subid: SubId, commit_hash: H256, ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + // Calculate subnet storage index let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); @@ -125,7 +128,7 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); + Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid_index, commit_hash)); // 12. Update the last commit block for the hotkey's UID. Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); @@ -293,6 +296,9 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + // Calculate netuid storage index let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); @@ -476,7 +482,7 @@ impl Pallet { // --- 5. Hash the provided data. let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); + Self::get_commit_hash(&who, netuid_index, &uids, &values, &salt, version_key); // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { @@ -515,16 +521,17 @@ impl Pallet { } // --- 12. Proceed to set the revealed weights. - Self::do_set_weights( + Self::do_set_sub_weights( origin, netuid, + subid, uids.clone(), values.clone(), version_key, )?; // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid_index, provided_hash)); // --- 14. Return ok. Ok(()) @@ -1085,17 +1092,20 @@ impl Pallet { neuron_uid: u16, current_block: u64, ) -> bool { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); - if last_set_weights == 0 { - return true; - } // (Storage default) Never set weights. - return current_block.saturating_sub(last_set_weights) - >= Self::get_weights_set_rate_limit(netuid); + let maybe_netuid_and_subid = Self::get_netuid_and_subid(netuid_index); + if let Ok((netuid, _)) = maybe_netuid_and_subid { + if Self::is_uid_exist_on_network(netuid, neuron_uid) { + // --- 1. Ensure that the diff between current and last_set weights is greater than limit. + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); + if last_set_weights == 0 { + return true; + } // (Storage default) Never set weights. + return current_block.saturating_sub(last_set_weights) + >= Self::get_weights_set_rate_limit(netuid); + } } - // --- 3. Non registered peers cant pass. + + // --- 3. Non registered peers cant pass. Neither can non-existing subid false } @@ -1291,13 +1301,13 @@ impl Pallet { pub fn get_commit_hash( who: &T::AccountId, - netuid: NetUid, + netuid_index: NetUidStorageIndex, uids: &[u16], values: &[u16], salt: &[u16], version_key: u64, ) -> H256 { - BlakeTwo256::hash_of(&(who.clone(), netuid, uids, values, salt, version_key)) + BlakeTwo256::hash_of(&(who.clone(), netuid_index, uids, values, salt, version_key)) } pub fn find_commit_block_via_hash(hash: H256) -> Option { diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index af72ac6924..fa628013ac 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,6 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. +// cargo test --package pallet-subtensor --lib -- tests::epoch::test_outdated_weights --exact --show-output #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 09e43b6f13..1761a9d595 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -14,9 +14,13 @@ // - [x] Emissions are split proportionally // - [x] Sum of split emissions is equal to rao_emission passed to epoch // - [ ] Only subnet owner or root can set desired subsubnet count -// - [ ] Weights can be set/commited/revealed by subsubnet -// - [ ] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force -// - [ ] When a miner is deregistered, their weights are cleaned across all subsubnets +// - [x] Weights can be set by subsubnet +// - [x] Weights can be commited/revealed by subsubnet +// - [x] Weights can be commited/revealed in crv3 by subsubnet +// - [x] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force +// - [x] When a miner is deregistered, their weights are cleaned across all subsubnets // - [ ] Weight setting rate limiting is enforced by subsubnet // - [x] Bonds are applied per subsubnet // - [x] Incentives are per subsubnet @@ -28,16 +32,32 @@ // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake -// - [ ] Miner with no weights on any subsubnet receives no reward +// - [x] Miner with no weights on any subsubnet receives no reward use super::mock::*; +use crate::coinbase::reveal_commits::WeightsTlockPayload; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; use approx::assert_abs_diff_eq; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use codec::Encode; use frame_support::{assert_noop, assert_ok}; -use sp_core::U256; +use frame_system::RawOrigin; +use pallet_drand::types::Pulse; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use sp_core::{H256, U256}; +use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; +use substrate_fixed::types::I32F32; +use tle::{ + curves::drand::TinyBLS381, + ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, + tlock::tle, +}; +use w3f_bls::EngineBLS; #[test] fn test_index_from_netuid_and_subnet() { @@ -577,3 +597,541 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { ); }); } + +#[test] +fn epoch_with_subsubnets_no_weight_no_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(5); // No weight miner + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set no weight to uid2 on sub-subnet 0 and 1 + Weights::::insert(idx0, 0, vec![(1u16, 1), (2u16, 0)]); + Weights::::insert(idx1, 0, vec![(1u16, 1), (2u16, 0)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF; + assert_eq!(actual_incentive_sub0[0], 0); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub0[2], 0); + assert_eq!(actual_incentive_sub1[0], 0); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + assert_eq!(actual_incentive_sub1[2], 0); + assert_eq!(actual_incentive_sub0.len(), 3); + assert_eq!(actual_incentive_sub1.len(), 3); + }); +} + +#[test] +fn neuron_dereg_cleans_weights_across_subids() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(77u16); + let neuron_uid: u16 = 1; // we'll deregister UID=1 + // two sub-subnets + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Setup initial map values + Emission::::insert(netuid, vec![AlphaCurrency::from(1u64), AlphaCurrency::from(9u64), AlphaCurrency::from(3u64)]); + Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); + Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); + Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); + + // Clearing per-subid maps + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive vector: position 1 should become 0 + Incentive::::insert(idx, vec![10u16, 20u16, 30u16]); + + // Row set BY neuron_uid (to be removed) + Weights::::insert(idx, neuron_uid, vec![(0u16, 5u16)]); + Bonds::::insert(idx, neuron_uid, vec![(0u16, 6u16)]); + + // Rows FOR neuron_uid inside other validators' vecs => value should be set to 0 (not removed) + Weights::::insert(idx, 0u16, vec![(neuron_uid, 7u16), (42u16, 3u16)]); + Bonds::::insert(idx, 0u16, vec![(neuron_uid, 8u16), (42u16, 4u16)]); + } + + // Act + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // Top-level zeroed at index 1, others intact + let e = Emission::::get(netuid); + assert_eq!(e[0], 1u64.into()); + assert_eq!(e[1], 0u64.into()); + assert_eq!(e[2], 3u64.into()); + + let t = Trust::::get(netuid); + assert_eq!(t, vec![11, 0, 33]); + + let c = Consensus::::get(netuid); + assert_eq!(c, vec![21, 0, 44]); + + let d = Dividends::::get(netuid); + assert_eq!(d, vec![7, 0, 17]); + + // Per-subid cleanup + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive element at index 1 set to 0 + let inc = Incentive::::get(idx); + assert_eq!(inc, vec![10, 0, 30]); + + // Rows BY neuron_uid removed + assert!(!Weights::::contains_key(idx, neuron_uid)); + assert!(!Bonds::::contains_key(idx, neuron_uid)); + + // In other rows, entries FOR neuron_uid are zeroed, others unchanged + let w0 = Weights::::get(idx, 0u16); + assert!(w0.iter().any(|&(u, w)| u == neuron_uid && w == 0)); + assert!(w0.iter().any(|&(u, w)| u == 42 && w == 3)); + } + }); +} + +#[test] +fn clear_neuron_handles_absent_rows_gracefully() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(55u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(1u8)); // single sub-subnet + + // Minimal vectors with non-zero at index 0 (we will clear UID=0) + Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); + Trust::::insert(netuid, vec![5u16]); + Consensus::::insert(netuid, vec![6u16]); + Dividends::::insert(netuid, vec![7u16]); + + // No Weights/Bonds rows at all → function should not panic + let neuron_uid: u16 = 0; + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // All zeroed at index 0 + assert_eq!(Emission::::get(netuid), vec![AlphaCurrency::from(0u64)]); + assert_eq!(Trust::::get(netuid), vec![0u16]); + assert_eq!(Consensus::::get(netuid), vec![0u16]); + assert_eq!(Dividends::::get(netuid), vec![0u16]); + }); +} + +#[test] +fn test_set_sub_weights_happy_path_sets_row_under_subid() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).expect("dest uid 2"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Have at least two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + + // Call extrinsic + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFF]; + assert_ok!(SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid, + dests.clone(), + weights.clone(), + 0, // version_key + )); + + // Verify row exists under the chosen subid and not under a different subid + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFF)]); + + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_set_sub_weights_above_subsubnet_count_fails() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Have exactly two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); + + // Call extrinsic + let dests = vec![uid2]; + let weights = vec![88u16]; + assert_noop!( + SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid_above, + dests.clone(), + weights.clone(), + 0, // version_key + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_commit_reveal_sub_weights_ok() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Three neurons: validator (caller) + two destinations + let hk1 = U256::from(55); let ck1 = U256::from(66); + let hk2 = U256::from(77); let ck2 = U256::from(88); + let hk3 = U256::from(99); let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Ensure sub-subnet exists; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + + // Prepare payload and commit hash (include subid!) + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFFu16]; + let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx1, dests.clone(), weights.clone(), salt.clone(), version_key)); + + // Commit in epoch 0 + assert_ok!(SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid, commit_hash)); + + // Advance one epoch, then reveal + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + dests.clone(), + weights.clone(), + salt, + version_key + )); + + // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) + assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFFu16)]); + + // And not under a different subid + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_commit_reveal_above_subsubnet_count_fails() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Two neurons: validator (caller) + miner + let hk1 = U256::from(55); let ck1 = U256::from(66); + let hk2 = U256::from(77); let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Ensure there are two subsubnets: 0 and 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let idx2 = SubtensorModule::get_subsubnet_storage_index(netuid, subid_above); + + // Prepare payload and commit hash + let dests = vec![uid2]; + let weights = vec![88u16]; + let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx2, dests.clone(), weights.clone(), salt.clone(), version_key)); + + // Commit in epoch 0 + assert_noop!( + SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid_above, commit_hash), + Error::::SubNetworkDoesNotExist + ); + + // Advance one epoch, then attempt to reveal + step_epochs(1, netuid); + assert_noop!( + SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + dests.clone(), + weights.clone(), + salt, + version_key + ), + Error::::NoWeightsCommitFound + ); + + // Verify that weights didn't update + assert!(Weights::::get(idx2, uid1).is_empty()); + assert!(Weights::::get(idx2, uid2).is_empty()); + }); +} + +#[test] +fn test_reveal_crv3_commits_sub_success() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); // write under sub-subnet #1 + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_validator_permit_for_uid(netuid, uid2, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(4), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &U256::from(4), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // Inject drand pulse for the reveal round + let sig_bytes = hex::decode("b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39").unwrap(); + pallet_drand::Pulses::::insert( + reveal_round, + Pulse { + round: reveal_round, + randomness: vec![0; 32].try_into().unwrap(), + signature: sig_bytes.try_into().unwrap(), + }, + ); + + // Run epochs so the commit is processed + step_epochs(3, netuid); + + // Verify weights applied under the selected subid index + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let weights_sparse = SubtensorModule::get_weights_sparse(idx); + let row = weights_sparse.get(uid1 as usize).cloned().unwrap_or_default(); + assert!(!row.is_empty(), "expected weights set for validator uid1 under subid"); + + // Compare rounded normalized weights to expected proportions (like legacy test) + let expected: Vec<(u16, I32F32)> = payload.uids.iter().zip(payload.values.iter()).map(|(&u,&v)|(u, I32F32::from_num(v))).collect(); + let total: I32F32 = row.iter().map(|(_, w)| *w).sum(); + let normalized: Vec<(u16, I32F32)> = row.iter().map(|&(u,w)| (u, w * I32F32::from_num(30) / total)).collect(); + + for ((ua, wa), (ub, wb)) in normalized.iter().zip(expected.iter()) { + assert_eq!(ua, ub); + let actual = wa.to_num::().round() as i64; + let expect = wb.to_num::(); + assert_ne!(actual, 0, "actual weight for uid {} is zero", ua); + assert_eq!(actual, expect, "weight mismatch for uid {}", ua); + } + }); +} + +#[test] +fn test_crv3_above_subsubnet_count_fails() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid_above, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::SubNetworkDoesNotExist + ); + }); +} diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 191ad5ce47..5cc624b644 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -341,7 +341,7 @@ fn test_reveal_weights_validate() { }); let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, netuid, &dests, &weights, &salt, version_key); + SubtensorModule::get_commit_hash(&who, NetUidStorageIndex::from(netuid), &dests, &weights, &salt, version_key); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index 537562fab7..ba65c3afe5 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -16,7 +16,7 @@ use sp_runtime::transaction_validity::{ use sp_std::marker::PhantomData; use sp_std::vec::Vec; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; #[freeze_struct("2e02eb32e5cb25d3")] #[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] @@ -148,7 +148,7 @@ where if Self::check_weights_min_stake(who, *netuid) { let provided_hash = Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids, values, salt, @@ -185,7 +185,7 @@ where .map(|i| { Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids_list.get(i).unwrap_or(&Vec::new()), values_list.get(i).unwrap_or(&Vec::new()), salts_list.get(i).unwrap_or(&Vec::new()), From 100f6e20173360c7303cf89fee0626282c83bc6b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 13:09:11 -0400 Subject: [PATCH 125/379] Add rate limit test for committing timelocked weights --- pallets/subtensor/src/tests/subsubnet.rs | 90 ++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 1761a9d595..e25edea950 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -1135,3 +1135,93 @@ fn test_crv3_above_subsubnet_count_fails() { ); }); } + +#[test] +fn test_do_commit_crv3_sub_weights_committing_too_fast() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); + let hotkey: AccountId = U256::from(1); + let commit_data_1: Vec = vec![1, 2, 3]; + let commit_data_2: Vec = vec![4, 5, 6]; + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("uid"); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + SubtensorModule::set_last_update_for_uid(idx1, uid, 0); + + // make validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &U256::from(2), netuid, 1.into()); + + // first commit OK on subid=1 + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_1.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // immediate second commit on SAME subid blocked + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // BUT committing too soon on a DIFFERENT subid is allowed + let other_subid = SubId::from(0u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, other_subid); + SubtensorModule::set_last_update_for_uid(idx0, uid, 0); // baseline like above + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + other_subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // still too fast on original subid after 2 blocks + step_block(2); + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // after enough blocks, OK again on original subid + step_block(3); + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + }); +} From eb001e35291d48fdceb5f28ae4f5ecca6fbd0afd Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 3 Sep 2025 14:36:31 -0300 Subject: [PATCH 126/379] fix misleading comment --- pallets/subtensor/src/subnets/uids.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index e116e98ac4..cfa1770029 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -202,7 +202,7 @@ impl Pallet { Keys::::remove(netuid, uid); // Remove block at registration for the uid BlockAtRegistration::::remove(netuid, uid); - // Trim weights and bonds for removed UIDs + // Remove entire weights and bonds entries for removed UIDs Weights::::remove(netuid, uid); Bonds::::remove(netuid, uid); } From b96c60c418d298a1d0c64e32c31f82c899e0dff2 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 3 Sep 2025 14:45:12 -0300 Subject: [PATCH 127/379] fix merge issue --- pallets/admin-utils/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 64e4a54c8b..a4d8ff9061 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1648,6 +1648,7 @@ pub mod pallet { ); } pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; + Ok(()) } } } From 88dd86c5cb96dfd1aff40d615162cdd8f5f7e3a5 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 13:50:51 -0400 Subject: [PATCH 128/379] Add admin-until call to set desired subsubnet count and test --- pallets/admin-utils/src/lib.rs | 17 ++++++++- pallets/admin-utils/src/tests/mod.rs | 45 +++++++++++++++++++++++- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 4 +-- 4 files changed, 63 insertions(+), 5 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index abc5e7a443..2df34f42df 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -31,7 +31,7 @@ pub mod pallet { use pallet_subtensor::utils::rate_limiting::TransactionType; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, TaoCurrency}; + use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] @@ -1591,6 +1591,21 @@ pub mod pallet { pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; Ok(()) } + + /// Sets the desired number of subsubnets in a subnet + #[pallet::call_index(73)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_desired_subsubnet_count( + origin: OriginFor, + netuid: NetUid, + subsub_count: SubId, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index bf1b115dd6..e8cb71a9b9 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -11,7 +11,7 @@ use pallet_subtensor::Event; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{Currency, NetUid, SubId, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -1954,3 +1954,46 @@ fn test_sudo_set_commit_reveal_version() { ); }); } + +#[test] +fn test_sudo_set_desired_subsubnet_count() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = SubId::from(8); + let ss_count_bad = SubId::from(9); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_eq!( + AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + ss_count_ok + ), + Err(DispatchError::BadOrigin) + ); + assert_noop!( + AdminUtils::sudo_set_desired_subsubnet_count( + RuntimeOrigin::root(), + netuid, + ss_count_bad + ), + pallet_subtensor::Error::::InvalidValue + ); + + assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::root(), + netuid, + ss_count_ok + )); + + assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + }); +} \ No newline at end of file diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index fa628013ac..1b29fb7118 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,7 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. -// cargo test --package pallet-subtensor --lib -- tests::epoch::test_outdated_weights --exact --show-output +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index e25edea950..c4c043b4f9 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -13,7 +13,7 @@ // - [x] do_set_desired tests // - [x] Emissions are split proportionally // - [x] Sum of split emissions is equal to rao_emission passed to epoch -// - [ ] Only subnet owner or root can set desired subsubnet count +// - [x] Only subnet owner or root can set desired subsubnet count (pallet admin test) // - [x] Weights can be set by subsubnet // - [x] Weights can be commited/revealed by subsubnet // - [x] Weights can be commited/revealed in crv3 by subsubnet @@ -21,7 +21,7 @@ // - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force // - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force // - [x] When a miner is deregistered, their weights are cleaned across all subsubnets -// - [ ] Weight setting rate limiting is enforced by subsubnet +// - [x] Weight setting rate limiting is enforced by subsubnet // - [x] Bonds are applied per subsubnet // - [x] Incentives are per subsubnet // - [x] Per-subsubnet incentives are distributed proportionally to miner weights From 4b3d2d39d1bdcb0458d72cb231bfa1d1031b6094 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 17:15:26 -0400 Subject: [PATCH 129/379] Add settable emission split between subsubnets --- pallets/admin-utils/src/lib.rs | 17 ++- pallets/admin-utils/src/tests/mod.rs | 4 +- pallets/subtensor/src/lib.rs | 4 + pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/subnets/subsubnet.rs | 129 ++++++++++++----- pallets/subtensor/src/subnets/uids.rs | 2 +- pallets/subtensor/src/subnets/weights.rs | 12 +- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 155 ++++++++++++++++----- pallets/subtensor/src/tests/weights.rs | 10 +- 10 files changed, 258 insertions(+), 79 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2df34f42df..ac01f5ae9a 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -9,7 +9,7 @@ pub use pallet::*; // - we could use a type parameter for `AuthorityId`, but there is // no sense for this as GRANDPA's `AuthorityId` is not a parameter -- it's always the same use sp_consensus_grandpa::AuthorityList; -use sp_runtime::{DispatchResult, RuntimeAppPublic, traits::Member}; +use sp_runtime::{DispatchResult, RuntimeAppPublic, Vec, traits::Member}; mod benchmarking; @@ -1606,6 +1606,21 @@ pub mod pallet { pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; Ok(()) } + + /// Sets the emission split between subsubnets in a subnet + #[pallet::call_index(74)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_subsubnet_emission_split( + origin: OriginFor, + netuid: NetUid, + maybe_split: Option>, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index e8cb71a9b9..f5a6876ec3 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1991,9 +1991,9 @@ fn test_sudo_set_desired_subsubnet_count() { )); assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( - <::RuntimeOrigin>::signed(sn_owner), + <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok )); }); -} \ No newline at end of file +} diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 961109c200..a08587934c 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1835,6 +1835,10 @@ pub mod pallet { /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subsubnets + pub type SubsubnetEmissionSplit = + StorageMap<_, Twox64Concat, NetUid, Vec, OptionQuery>; /// ================== /// ==== Genesis ===== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 070c8ca366..9c423aa6fb 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2263,7 +2263,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index cca8df95db..d72c696c8b 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -117,7 +117,8 @@ impl Pallet { /// /// - This function should be called in every block in run_counbase /// - Cleans up all sub-subnet maps if count is reduced - /// - Decreases current subsubnet count by no more than `GlobalSubsubnetDecreasePerSuperblock` + /// - Decreases or increases current subsubnet count by no more than + /// `GlobalSubsubnetDecreasePerSuperblock` /// pub fn update_subsubnet_counts_if_needed(current_block: u64) { // Run once per super-block @@ -128,59 +129,113 @@ impl Pallet { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); - let min_possible_count = old_count + let min_capped_count = old_count .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) .max(1); - let new_count = desired_count.max(min_possible_count); - - if old_count > new_count { - for subid in new_count..old_count { - let netuid_index = - Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + let max_capped_count = old_count + .saturating_add(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())); + let new_count = desired_count.max(min_capped_count).min(max_capped_count); + + if old_count != new_count { + if old_count > new_count { + for subid in new_count..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = + WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = TimelockedWeightCommits::::clear_prefix( + netuid_index, + u32::MAX, + None, + ); + } + } - // Cleanup Weights - let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); - // Cleanup Incentive - Incentive::::remove(netuid_index); + // Reset split back to even + SubsubnetEmissionSplit::::remove(netuid); + } + } + } + }); + } - // Cleanup LastUpdate - LastUpdate::::remove(netuid_index); + pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); - // Cleanup Bonds - let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + if let Some(split) = maybe_split { + // Check the length + ensure!(!split.is_empty(), Error::::InvalidValue); + ensure!( + split.len() <= u8::from(SubsubnetCountCurrent::::get(netuid)) as usize, + Error::::InvalidValue + ); - // Cleanup WeightCommits - let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + // Check that values add up to 65535 + let total: u64 = split.iter().map(|s| *s as u64).sum(); + ensure!(total <= u16::MAX as u64, Error::::InvalidValue); - // Cleanup TimelockedWeightCommits - let _ = TimelockedWeightCommits::::clear_prefix( - netuid_index, - u32::MAX, - None, - ); - } - } + SubsubnetEmissionSplit::::insert(netuid, split); + } else { + SubsubnetEmissionSplit::::remove(netuid); + } - SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); - } - } - }); + Ok(()) } /// Split alpha emission in sub-subnet proportions - /// Currently splits evenly between sub-subnets, but the implementation - /// may change in the future + /// stored in SubsubnetEmissionSplit /// pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); + let maybe_split = SubsubnetEmissionSplit::::get(netuid); + + // Unset split means even distribution + let mut result: Vec = if let Some(split) = maybe_split { + split + .iter() + .map(|s| { + AlphaCurrency::from( + (u64::from(alpha) as u128) + .saturating_mul(*s as u128) + .safe_div(u16::MAX as u128) as u64, + ) + }) + .collect() + } else { + let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); + vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize] + }; - // If there's any rounding error, credit it to subsubnet 0 - let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); - let rounding_err = - u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); + // Trim / extend and pad with zeroes if result is shorter than subsubnet_count + if result.len() != subsubnet_count as usize { + result.resize(subsubnet_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) + } - let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; + // If there's any rounding error or lost due to truncation emission, credit it to subsubnet 0 + let rounding_err = + u64::from(alpha).saturating_sub(result.iter().map(|s| u64::from(*s)).sum()); if let Some(cell) = result.first_mut() { *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index ce0b14cc1c..2ec6869bad 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,7 +16,7 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 0c1ad9efd7..b751630d85 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -128,7 +128,11 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid_index, commit_hash)); + Self::deposit_event(Event::WeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + )); // 12. Update the last commit block for the hotkey's UID. Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); @@ -531,7 +535,11 @@ impl Pallet { )?; // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid_index, provided_hash)); + Self::deposit_event(Event::WeightsRevealed( + who.clone(), + netuid_index, + provided_hash, + )); // --- 14. Return ok. Ok(()) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 1b29fb7118..fec978a51d 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,7 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. -// +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index c4c043b4f9..78fbefa681 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -27,12 +27,15 @@ // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [ ] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward +// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase +// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -49,13 +52,11 @@ use sha2::Digest; use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use substrate_fixed::types::I32F32; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use tle::{ - curves::drand::TinyBLS381, - ibe::fullident::Identity, - stream_ciphers::AESGCMStreamCipherProvider, - tlock::tle, + curves::drand::TinyBLS381, ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, }; use w3f_bls::EngineBLS; @@ -117,8 +118,8 @@ fn test_netuid_and_subnet_from_index() { let (netuid, subid) = SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) .unwrap(); - assert_eq!(netuid, NetUid::from(expected_netuid as u16)); - assert_eq!(subid, SubId::from(expected_subid as u8)); + assert_eq!(netuid, NetUid::from(expected_netuid)); + assert_eq!(subid, SubId::from(expected_subid)); }); }); } @@ -420,6 +421,26 @@ fn split_emissions_rounding_to_first() { }); } +#[test] +fn split_emissions_fibbonacci() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + SubsubnetEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(19u64)); + assert_eq!( + out, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(3u64), + AlphaCurrency::from(5u64), + AlphaCurrency::from(8u64), + ] + ); + }); +} + /// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero /// incentives & dividends. /// Returns the sub-subnet storage index. @@ -433,8 +454,8 @@ pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U2 SubnetworkN::::insert(netuid, 2); // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. - Keys::::insert(netuid, 0u16, hk0.clone()); - Keys::::insert(netuid, 1u16, hk1.clone()); + Keys::::insert(netuid, 0u16, hk0); + Keys::::insert(netuid, 1u16, hk1); // Make both ACTIVE: recent updates & old registrations. Tempo::::insert(netuid, 1u16); @@ -479,7 +500,7 @@ pub fn mock_3_neurons(netuid: NetUid, hk: U256) { let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); SubnetworkN::::insert(netuid, 3); - Keys::::insert(netuid, 2u16, hk.clone()); + Keys::::insert(netuid, 2u16, hk); LastUpdate::::insert(idx0, vec![2, 2, 2]); LastUpdate::::insert(idx1, vec![2, 2, 2]); BlockAtRegistration::::insert(netuid, 2, 1u64); @@ -646,7 +667,14 @@ fn neuron_dereg_cleans_weights_across_subids() { SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // Setup initial map values - Emission::::insert(netuid, vec![AlphaCurrency::from(1u64), AlphaCurrency::from(9u64), AlphaCurrency::from(3u64)]); + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(9u64), + AlphaCurrency::from(3u64), + ], + ); Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); @@ -722,7 +750,10 @@ fn clear_neuron_handles_absent_rows_gracefully() { SubtensorModule::clear_neuron(netuid, neuron_uid); // All zeroed at index 0 - assert_eq!(Emission::::get(netuid), vec![AlphaCurrency::from(0u64)]); + assert_eq!( + Emission::::get(netuid), + vec![AlphaCurrency::from(0u64)] + ); assert_eq!(Trust::::get(netuid), vec![0u16]); assert_eq!(Consensus::::get(netuid), vec![0u16]); assert_eq!(Dividends::::get(netuid), vec![0u16]); @@ -755,7 +786,12 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Have at least two sub-subnets; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -775,7 +811,10 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { // Verify row exists under the chosen subid and not under a different subid let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); - assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFF)]); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFF)] + ); let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); assert!(Weights::::get(idx0, uid1).is_empty()); @@ -804,7 +843,12 @@ fn test_set_sub_weights_above_subsubnet_count_fails() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Have exactly two sub-subnets; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -837,9 +881,12 @@ fn test_commit_reveal_sub_weights_ok() { add_network(netuid, tempo, 0); // Three neurons: validator (caller) + two destinations - let hk1 = U256::from(55); let ck1 = U256::from(66); - let hk2 = U256::from(77); let ck2 = U256::from(88); - let hk3 = U256::from(99); let ck3 = U256::from(111); + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); register_ok_neuron(netuid, hk1, ck1, 0); register_ok_neuron(netuid, hk2, ck2, 0); register_ok_neuron(netuid, hk3, ck3, 0); @@ -854,7 +901,12 @@ fn test_commit_reveal_sub_weights_ok() { SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Ensure sub-subnet exists; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -865,12 +917,24 @@ fn test_commit_reveal_sub_weights_ok() { // Prepare payload and commit hash (include subid!) let dests = vec![uid2, uid3]; let weights = vec![88u16, 0xFFFFu16]; - let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; - let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx1, dests.clone(), weights.clone(), salt.clone(), version_key)); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx1, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); // Commit in epoch 0 - assert_ok!(SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid, commit_hash)); + assert_ok!(SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + commit_hash + )); // Advance one epoch, then reveal step_epochs(1, netuid); @@ -885,7 +949,10 @@ fn test_commit_reveal_sub_weights_ok() { )); // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) - assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFFu16)]); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFFu16)] + ); // And not under a different subid assert!(Weights::::get(idx0, uid1).is_empty()); @@ -902,8 +969,10 @@ fn test_commit_reveal_above_subsubnet_count_fails() { add_network(netuid, tempo, 0); // Two neurons: validator (caller) + miner - let hk1 = U256::from(55); let ck1 = U256::from(66); - let hk2 = U256::from(77); let ck2 = U256::from(88); + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); register_ok_neuron(netuid, hk1, ck1, 0); register_ok_neuron(netuid, hk2, ck2, 0); @@ -916,7 +985,12 @@ fn test_commit_reveal_above_subsubnet_count_fails() { SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Ensure there are two subsubnets: 0 and 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -926,13 +1000,25 @@ fn test_commit_reveal_above_subsubnet_count_fails() { // Prepare payload and commit hash let dests = vec![uid2]; let weights = vec![88u16]; - let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; - let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx2, dests.clone(), weights.clone(), salt.clone(), version_key)); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx2, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); // Commit in epoch 0 assert_noop!( - SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid_above, commit_hash), + SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + commit_hash + ), Error::::SubNetworkDoesNotExist ); @@ -1057,8 +1143,8 @@ fn test_reveal_crv3_commits_sub_success() { assert_eq!(ua, ub); let actual = wa.to_num::().round() as i64; let expect = wb.to_num::(); - assert_ne!(actual, 0, "actual weight for uid {} is zero", ua); - assert_eq!(actual, expect, "weight mismatch for uid {}", ua); + assert_ne!(actual, 0, "actual weight for uid {ua} is zero"); + assert_eq!(actual, expect, "weight mismatch for uid {ua}"); } }); } @@ -1161,7 +1247,12 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &U256::from(2), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &U256::from(2), + netuid, + 1.into(), + ); // first commit OK on subid=1 assert_ok!(SubtensorModule::commit_timelocked_sub_weights( diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 5cc624b644..bc9af5cf07 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -340,8 +340,14 @@ fn test_reveal_weights_validate() { version_key, }); - let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, NetUidStorageIndex::from(netuid), &dests, &weights, &salt, version_key); + let commit_hash: H256 = SubtensorModule::get_commit_hash( + &who, + NetUidStorageIndex::from(netuid), + &dests, + &weights, + &salt, + version_key, + ); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); From afa8129482f26054522ecdfb0d49925f56519a38 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 17:59:43 -0400 Subject: [PATCH 130/379] Add per-subsubnet RPC for metagraph --- pallets/subtensor/rpc/src/lib.rs | 64 +++++++++++++- pallets/subtensor/runtime-api/src/lib.rs | 5 +- pallets/subtensor/src/rpc_info/metagraph.rs | 98 ++++++++++++++++++++- runtime/src/lib.rs | 11 +++ 4 files changed, 175 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index e3d5d8f1c1..ea46695142 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; use sp_api::ProvideRuntimeApi; @@ -72,6 +72,15 @@ pub trait SubtensorCustomApi { fn get_all_metagraphs(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getMetagraph")] fn get_metagraph(&self, netuid: NetUid, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getAllSubMetagraphs")] + fn get_all_submetagraphs(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getSubMetagraph")] + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option, + ) -> RpcResult>; #[method(name = "subnetInfo_getSubnetState")] fn get_subnet_state(&self, netuid: NetUid, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getLockCost")] @@ -83,6 +92,14 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; + #[method(name = "subnetInfo_getSelectiveSubMetagraph")] + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option, + ) -> RpcResult>; } pub struct SubtensorCustom { @@ -319,6 +336,16 @@ where } } + fn get_all_submetagraphs(&self, at: Option<::Hash>) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_all_submetagraphs(at) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!("Unable to get metagraps: {e:?}")).into()), + } + } + fn get_dynamic_info( &self, netuid: NetUid, @@ -352,6 +379,23 @@ where } } + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + match api.get_submetagraph(at, netuid, subid) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!( + "Unable to get dynamic subnets info: {e:?}" + )) + .into()), + } + } + fn get_subnet_state( &self, netuid: NetUid, @@ -427,4 +471,22 @@ where } } } + + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_selective_submetagraph(at, netuid, subid, metagraph_index) { + Ok(result) => Ok(result.encode()), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) + } + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 42d12eb686..3ec76df45f 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs @@ -40,9 +40,12 @@ sp_api::decl_runtime_apis! { fn get_all_dynamic_info() -> Vec>>; fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; + fn get_all_submetagraphs() -> Vec>>; + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index e65ddf0696..0f6fa067c9 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -7,7 +7,7 @@ use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -796,6 +796,45 @@ impl Pallet { metagraphs } + pub fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + if Self::ensure_subsubnet_exists(netuid, subid).is_err() { + return None; + } + + // Get netuid metagraph + let maybe_meta = Self::get_metagraph(netuid); + if let Some(mut meta) = maybe_meta { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Update with subsubnet information + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta.last_update = LastUpdate::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + meta.incentives = Incentive::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + + Some(meta) + } else { + None + } + } + + pub fn get_all_submetagraphs() -> Vec>> { + let netuids = Self::get_all_subnet_netuids(); + let mut metagraphs = Vec::>>::new(); + for netuid in netuids.clone().iter() { + let subsub_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + for subid in 0..subsub_count { + metagraphs.push(Self::get_submetagraph(*netuid, SubId::from(subid))); + } + } + metagraphs + } + pub fn get_selective_metagraph( netuid: NetUid, metagraph_indexes: Vec, @@ -812,6 +851,23 @@ impl Pallet { } } + pub fn get_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_indexes: Vec, + ) -> Option> { + if !Self::if_subnet_exist(netuid) { + None + } else { + let mut result = SelectiveMetagraph::default(); + for index in metagraph_indexes.iter() { + let value = Self::get_single_selective_submetagraph(netuid, subid, *index); + result.merge_value(&value, *index as usize); + } + Some(result) + } + } + fn get_single_selective_metagraph( netuid: NetUid, metagraph_index: u16, @@ -1375,6 +1431,46 @@ impl Pallet { } } + fn get_single_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_index: u16, + ) -> SelectiveMetagraph { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Default to netuid, replace as needed for subid + match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { + Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { + netuid: netuid.into(), + incentives: Some( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { + netuid: netuid.into(), + last_update: Some( + LastUpdate::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + _ => { + let mut meta = Self::get_single_selective_metagraph(netuid, metagraph_index); + // Replace netuid with index + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta + } + } + } + fn get_validators(netuid: NetUid) -> SelectiveMetagraph { let stake_threshold = Self::get_stake_threshold(); let hotkeys: Vec<(u16, T::AccountId)> = diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 7675c962c6..7d1b701171 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -2314,6 +2314,10 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + SubtensorModule::get_submetagraph(netuid, subid) + } + fn get_subnet_state(netuid: NetUid) -> Option> { SubtensorModule::get_subnet_state(netuid) } @@ -2322,6 +2326,10 @@ impl_runtime_apis! { SubtensorModule::get_all_metagraphs() } + fn get_all_submetagraphs() -> Vec>> { + SubtensorModule::get_all_submetagraphs() + } + fn get_all_dynamic_info() -> Vec>> { SubtensorModule::get_all_dynamic_info() } @@ -2330,6 +2338,9 @@ impl_runtime_apis! { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_submetagraph(netuid, subid, metagraph_indexes) + } } impl subtensor_custom_rpc_runtime_api::StakeInfoRuntimeApi for Runtime { From c8d5ebe58ef118e1d8ea372855325020318a86d1 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Wed, 3 Sep 2025 15:24:41 +0300 Subject: [PATCH 131/379] Apply rate limits for hyperparams set --- pallets/admin-utils/src/lib.rs | 351 +++++++++++++++---- pallets/subtensor/src/macros/events.rs | 4 + pallets/subtensor/src/utils/misc.rs | 91 +++-- pallets/subtensor/src/utils/rate_limiting.rs | 12 + 4 files changed, 348 insertions(+), 110 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index abc5e7a443..e8d3827966 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -22,10 +22,7 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_support::traits::tokens::Balance; - use frame_support::{ - dispatch::{DispatchResult, RawOrigin}, - pallet_prelude::StorageMap, - }; + use frame_support::{dispatch::DispatchResult, pallet_prelude::StorageMap}; use frame_system::pallet_prelude::*; use pallet_evm_chain_id::{self, ChainId}; use pallet_subtensor::utils::rate_limiting::TransactionType; @@ -214,10 +211,18 @@ pub mod pallet { netuid: NetUid, serving_rate_limit: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); log::debug!("ServingRateLimitSet( serving_rate_limit: {serving_rate_limit:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -233,7 +238,7 @@ pub mod pallet { netuid: NetUid, min_difficulty: u64, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -258,7 +263,11 @@ pub mod pallet { netuid: NetUid, max_difficulty: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -268,6 +277,11 @@ pub mod pallet { log::debug!( "MaxDifficultySet( netuid: {netuid:?} max_difficulty: {max_difficulty:?} ) " ); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -283,34 +297,28 @@ pub mod pallet { netuid: NetUid, weights_version_key: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[ + TransactionType::OwnerHyperparamUpdate, + TransactionType::SetWeightsVersionKey, + ], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); - if let Ok(RawOrigin::Signed(who)) = origin.into() { - // SN Owner - // Ensure the origin passes the rate limit. - ensure!( - pallet_subtensor::Pallet::::passes_rate_limit_on_subnet( - &TransactionType::SetWeightsVersionKey, - &who, - netuid, - ), - pallet_subtensor::Error::::TxRateLimitExceeded - ); - - // Set last transaction block - let current_block = pallet_subtensor::Pallet::::get_current_block_as_u64(); - pallet_subtensor::Pallet::::set_last_transaction_block_on_subnet( - &who, - netuid, - &TransactionType::SetWeightsVersionKey, - current_block, - ); - } + Self::record_owner_rl( + maybe_owner, + netuid, + &[ + TransactionType::OwnerHyperparamUpdate, + TransactionType::SetWeightsVersionKey, + ], + ); pallet_subtensor::Pallet::::set_weights_version_key(netuid, weights_version_key); log::debug!( @@ -388,13 +396,22 @@ pub mod pallet { netuid: NetUid, adjustment_alpha: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_adjustment_alpha(netuid, adjustment_alpha); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); log::debug!("AdjustmentAlphaSet( adjustment_alpha: {adjustment_alpha:?} ) "); Ok(()) } @@ -411,13 +428,22 @@ pub mod pallet { netuid: NetUid, max_weight_limit: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_max_weight_limit(netuid, max_weight_limit); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); log::debug!( "MaxWeightLimitSet( netuid: {netuid:?} max_weight_limit: {max_weight_limit:?} ) " ); @@ -436,13 +462,22 @@ pub mod pallet { netuid: NetUid, immunity_period: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_immunity_period(netuid, immunity_period); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); log::debug!( "ImmunityPeriodSet( netuid: {netuid:?} immunity_period: {immunity_period:?} ) " ); @@ -461,7 +496,11 @@ pub mod pallet { netuid: NetUid, min_allowed_weights: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -471,6 +510,11 @@ pub mod pallet { log::debug!( "MinAllowedWeightSet( netuid: {netuid:?} min_allowed_weights: {min_allowed_weights:?} ) " ); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -510,7 +554,11 @@ pub mod pallet { .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: NetUid, kappa: u16) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -518,6 +566,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_kappa(netuid, kappa); log::debug!("KappaSet( netuid: {netuid:?} kappa: {kappa:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -529,7 +582,11 @@ pub mod pallet { .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_rho(origin: OriginFor, netuid: NetUid, rho: u16) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -537,6 +594,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_rho(netuid, rho); log::debug!("RhoSet( netuid: {netuid:?} rho: {rho:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -552,7 +614,11 @@ pub mod pallet { netuid: NetUid, activity_cutoff: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -568,6 +634,11 @@ pub mod pallet { log::debug!( "ActivityCutoffSet( netuid: {netuid:?} activity_cutoff: {activity_cutoff:?} ) " ); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -613,7 +684,11 @@ pub mod pallet { netuid: NetUid, registration_allowed: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_network_pow_registration_allowed( netuid, @@ -622,6 +697,11 @@ pub mod pallet { log::debug!( "NetworkPowRegistrationAllowed( registration_allowed: {registration_allowed:?} ) " ); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -637,7 +717,7 @@ pub mod pallet { netuid: NetUid, target_registrations_per_interval: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -665,7 +745,7 @@ pub mod pallet { netuid: NetUid, min_burn: TaoCurrency, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -688,7 +768,7 @@ pub mod pallet { netuid: NetUid, max_burn: TaoCurrency, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -711,7 +791,7 @@ pub mod pallet { netuid: NetUid, difficulty: u64, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -733,7 +813,7 @@ pub mod pallet { netuid: NetUid, max_allowed_validators: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -766,9 +846,12 @@ pub mod pallet { netuid: NetUid, bonds_moving_average: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - - if pallet_subtensor::Pallet::::ensure_subnet_owner(origin, netuid).is_ok() { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; + if maybe_owner.is_some() { ensure!( bonds_moving_average <= 975000, Error::::BondsMovingAverageMaxReached @@ -783,6 +866,11 @@ pub mod pallet { log::debug!( "BondsMovingAverageSet( netuid: {netuid:?} bonds_moving_average: {bonds_moving_average:?} ) " ); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -798,7 +886,11 @@ pub mod pallet { netuid: NetUid, bonds_penalty: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -806,6 +898,11 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_bonds_penalty(netuid, bonds_penalty); log::debug!("BondsPenalty( netuid: {netuid:?} bonds_penalty: {bonds_penalty:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -821,7 +918,7 @@ pub mod pallet { netuid: NetUid, max_registrations_per_block: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -885,7 +982,7 @@ pub mod pallet { .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist @@ -1073,7 +1170,11 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1082,6 +1183,11 @@ pub mod pallet { pallet_subtensor::Pallet::::set_commit_reveal_weights_enabled(netuid, enabled); log::debug!("ToggleSetWeightsCommitReveal( netuid: {netuid:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1101,9 +1207,18 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); log::debug!("LiquidAlphaEnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1116,10 +1231,22 @@ pub mod pallet { alpha_low: u16, alpha_high: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - pallet_subtensor::Pallet::::do_set_alpha_values( + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; + let res = pallet_subtensor::Pallet::::do_set_alpha_values( origin, netuid, alpha_low, alpha_high, - ) + ); + if res.is_ok() { + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); + } + res } /// Sets the duration of the coldkey swap schedule. @@ -1211,7 +1338,11 @@ pub mod pallet { netuid: NetUid, interval: u64, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1221,6 +1352,11 @@ pub mod pallet { log::debug!("SetWeightCommitInterval( netuid: {netuid:?}, interval: {interval:?} ) "); pallet_subtensor::Pallet::::set_reveal_period(netuid, interval)?; + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1294,8 +1430,20 @@ pub mod pallet { netuid: NetUid, toggle: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle) + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; + let res = pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle); + if res.is_ok() { + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); + } + res } /// Toggles the enablement of an EVM precompile. @@ -1424,7 +1572,11 @@ pub mod pallet { netuid: NetUid, steepness: i16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -1440,6 +1592,11 @@ pub mod pallet { pallet_subtensor::Pallet::::set_alpha_sigmoid_steepness(netuid, steepness); log::debug!("AlphaSigmoidSteepnessSet( netuid: {netuid:?}, steepness: {steepness:?} )"); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1459,11 +1616,20 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); Self::deposit_event(Event::Yuma3EnableToggled { netuid, enabled }); log::debug!("Yuma3EnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1483,11 +1649,20 @@ pub mod pallet { netuid: NetUid, enabled: bool, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_bonds_reset(netuid, enabled); Self::deposit_event(Event::BondsResetToggled { netuid, enabled }); log::debug!("BondsResetToggled( netuid: {netuid:?} bonds_reset: {enabled:?} ) "); + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); Ok(()) } @@ -1550,7 +1725,7 @@ pub mod pallet { netuid: NetUid, subtoken_enabled: bool, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; pallet_subtensor::SubtokenEnabled::::set(netuid, subtoken_enabled); log::debug!( @@ -1587,10 +1762,62 @@ pub mod pallet { netuid: NetUid, immune_neurons: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + )?; pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; + Self::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ); + Ok(()) + } + + /// Sets the admin freeze window length (in blocks) at the end of a tempo. + /// Only callable by root. + #[pallet::call_index(73)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_admin_freeze_window(origin: OriginFor, window: u16) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_admin_freeze_window(window); + log::debug!("AdminFreezeWindowSet( window: {window:?} ) "); Ok(()) } + + /// Sets the owner hyperparameter rate limit (in blocks). + /// Only callable by root. + #[pallet::call_index(74)] + #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + pub fn sudo_set_owner_hparam_rate_limit( + origin: OriginFor, + limit: u64, + ) -> DispatchResult { + ensure_root(origin)?; + pallet_subtensor::Pallet::::set_owner_hyperparam_rate_limit(limit); + log::debug!("OwnerHyperparamRateLimitSet( limit: {limit:?} ) "); + Ok(()) + } + } + + impl Pallet { + // Helper: if owner path, record last-blocks for the provided TransactionTypes + fn record_owner_rl( + maybe_owner: Option<::AccountId>, + netuid: NetUid, + txs: &[TransactionType], + ) { + if let Some(who) = maybe_owner { + let now = pallet_subtensor::Pallet::::get_current_block_as_u64(); + for tx in txs { + pallet_subtensor::Pallet::::set_last_transaction_block_on_subnet( + &who, netuid, tx, now, + ); + } + } + } } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fab5ecdb4..4fdff241b2 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -114,6 +114,10 @@ mod events { TxDelegateTakeRateLimitSet(u64), /// setting the childkey take transaction rate limit. TxChildKeyTakeRateLimitSet(u64), + /// setting the admin freeze window length (last N blocks of tempo) + AdminFreezeWindowSet(u16), + /// setting the owner hyperparameter rate limit (in blocks) + OwnerHyperparamRateLimitSet(u64), /// minimum childkey take set MinChildKeyTakeSet(u16), /// maximum childkey take set diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index b7f3e1288e..14b617a0e0 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -1,6 +1,6 @@ use super::*; use crate::{ - Error, RateLimitKey, + Error, system::{ensure_root, ensure_signed, ensure_signed_or_root, pallet_prelude::BlockNumberFor}, }; use safe_math::*; @@ -14,20 +14,23 @@ impl Pallet { pub fn ensure_subnet_owner_or_root( o: T::RuntimeOrigin, netuid: NetUid, - ) -> Result<(), DispatchError> { + ) -> Result, DispatchError> { let coldkey = ensure_signed_or_root(o); match coldkey { - Ok(Some(who)) if SubnetOwner::::get(netuid) == who => Ok(()), + Ok(Some(who)) if SubnetOwner::::get(netuid) == who => Ok(Some(who)), Ok(Some(_)) => Err(DispatchError::BadOrigin), - Ok(None) => Ok(()), + Ok(None) => Ok(None), Err(x) => Err(x.into()), } } - pub fn ensure_subnet_owner(o: T::RuntimeOrigin, netuid: NetUid) -> Result<(), DispatchError> { + pub fn ensure_subnet_owner( + o: T::RuntimeOrigin, + netuid: NetUid, + ) -> Result { let coldkey = ensure_signed(o); match coldkey { - Ok(who) if SubnetOwner::::get(netuid) == who => Ok(()), + Ok(who) if SubnetOwner::::get(netuid) == who => Ok(who), Ok(_) => Err(DispatchError::BadOrigin), Err(x) => Err(x.into()), } @@ -44,39 +47,47 @@ impl Pallet { Ok(()) } - /// Like `ensure_subnet_owner` but also checks transaction rate limits. - pub fn ensure_sn_owner_with_rate_limit( + /// Ensure owner-or-root with a set of TransactionType rate checks (owner only). + /// - Root: only freeze window is enforced; no TransactionType checks. + /// - Owner (Signed): freeze window plus all rate checks in `limits` using signer extracted from + /// origin. + pub fn ensure_sn_owner_or_root_with_limits( o: T::RuntimeOrigin, netuid: NetUid, - ) -> Result<(), DispatchError> { - Self::ensure_subnet_owner(o, netuid)?; + limits: &[crate::utils::rate_limiting::TransactionType], + ) -> Result, DispatchError> { + let maybe_who = Self::ensure_subnet_owner_or_root(o, netuid)?; let now = Self::get_current_block_as_u64(); - // Disallow inside freeze window and enforce owner hyperparam rate limit Self::ensure_not_in_admin_freeze_window(netuid, now)?; - Self::ensure_owner_hparam_rate_limit(netuid, now)?; - Ok(()) + if let Some(who) = maybe_who.as_ref() { + for tx in limits.iter() { + ensure!( + Self::passes_rate_limit_on_subnet(tx, who, netuid), + Error::::TxRateLimitExceeded + ); + } + } + Ok(maybe_who) } - /// Like `ensure_subnet_owner_or_root` but also checks transaction rate limits. - /// Root is not rate-limited outside the freeze window, but is also prohibited inside it. - pub fn ensure_sn_owner_or_root_with_rate_limit( + /// Ensure the caller is the subnet owner and passes all provided rate limits. + /// This does NOT allow root; it is strictly owner-only. + /// Returns the signer (owner) on success so callers may record last-blocks. + pub fn ensure_sn_owner_with_limits( o: T::RuntimeOrigin, netuid: NetUid, - ) -> Result<(), DispatchError> { + limits: &[crate::utils::rate_limiting::TransactionType], + ) -> Result { + let who = Self::ensure_subnet_owner(o, netuid)?; let now = Self::get_current_block_as_u64(); - - // If root, only enforce freeze window. - if ensure_root(o.clone()).is_ok() { - Self::ensure_not_in_admin_freeze_window(netuid, now)?; - return Ok(()); - } - - // Otherwise ensure subnet owner and apply both checks. - Self::ensure_subnet_owner(o, netuid)?; Self::ensure_not_in_admin_freeze_window(netuid, now)?; - Self::ensure_owner_hparam_rate_limit(netuid, now)?; - - Ok(()) + for tx in limits.iter() { + ensure!( + Self::passes_rate_limit_on_subnet(tx, &who, netuid), + Error::::TxRateLimitExceeded + ); + } + Ok(who) } /// Returns true if the current block is within the terminal freeze window of the tempo for the @@ -100,18 +111,7 @@ impl Pallet { Ok(()) } - fn ensure_owner_hparam_rate_limit(netuid: NetUid, now: u64) -> Result<(), DispatchError> { - let limit = OwnerHyperparamRateLimit::::get(); - if limit > 0 { - let last = - Self::get_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid)); - ensure!( - now.saturating_sub(last) >= limit || last == 0, - Error::::TxRateLimitExceeded - ); - } - Ok(()) - } + // (Removed dedicated ensure_owner_hparam_rate_limit; OwnerHyperparamUpdate is checked via TransactionType) // === Admin freeze window accessors === pub fn get_admin_freeze_window() -> u16 { @@ -120,13 +120,7 @@ impl Pallet { pub fn set_admin_freeze_window(window: u16) { AdminFreezeWindow::::set(window); - } - - /// Helper to be called after a successful owner hyperparameter update. - /// Records the current block against the OwnerHyperparamUpdate rate limit key. - pub fn mark_owner_hyperparam_update(netuid: NetUid) { - let now = Self::get_current_block_as_u64(); - Self::set_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid), now); + Self::deposit_event(Event::AdminFreezeWindowSet(window)); } pub fn get_owner_hyperparam_rate_limit() -> u64 { @@ -135,6 +129,7 @@ impl Pallet { pub fn set_owner_hyperparam_rate_limit(limit: u64) { OwnerHyperparamRateLimit::::set(limit); + Self::deposit_event(Event::OwnerHyperparamRateLimitSet(limit)); } // ======================== diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index eeb5b96ddb..bdf8bc5147 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -11,6 +11,7 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, + OwnerHyperparamUpdate, } /// Implement conversion from TransactionType to u16 @@ -23,6 +24,7 @@ impl From for u16 { TransactionType::RegisterNetwork => 3, TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, + TransactionType::OwnerHyperparamUpdate => 6, } } } @@ -36,6 +38,7 @@ impl From for TransactionType { 3 => TransactionType::RegisterNetwork, 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, + 6 => TransactionType::OwnerHyperparamUpdate, _ => TransactionType::Unknown, } } @@ -50,6 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), + TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, @@ -62,6 +66,7 @@ impl Pallet { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), + TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), _ => Self::get_rate_limit(tx_type), } @@ -112,6 +117,9 @@ impl Pallet { TransactionType::SetSNOwnerHotkey => { Self::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) } + TransactionType::OwnerHyperparamUpdate => { + Self::get_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid)) + } _ => { let tx_as_u16: u16 = (*tx_type).into(); TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) @@ -139,6 +147,10 @@ impl Pallet { TransactionType::SetSNOwnerHotkey => { Self::set_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid), block) } + TransactionType::OwnerHyperparamUpdate => Self::set_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid), + block, + ), _ => { let tx_as_u16: u16 = (*tx_type).into(); TransactionKeyLastBlock::::insert((key, netuid, tx_as_u16), block); From 1c30fed681b7a88705dcae90b5c87c1cf30a5547 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 4 Sep 2025 13:06:59 +0300 Subject: [PATCH 132/379] Add tests --- pallets/admin-utils/src/tests/mock.rs | 5 +- pallets/admin-utils/src/tests/mod.rs | 130 +++++++++++++++- pallets/subtensor/src/tests/ensure.rs | 156 +++++++++++++++++++ pallets/subtensor/src/tests/mod.rs | 1 + pallets/subtensor/src/utils/rate_limiting.rs | 4 +- 5 files changed, 291 insertions(+), 5 deletions(-) create mode 100644 pallets/subtensor/src/tests/ensure.rs diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 35934bc846..0b23d9285b 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -476,7 +476,10 @@ pub fn new_test_ext() -> sp_io::TestExternalities { .build_storage() .unwrap(); let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); + ext.execute_with(|| { + System::set_block_number(1); + SubtensorModule::set_admin_freeze_window(1); + }); ext } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 5290d3ddfc..9f6095b821 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -175,7 +175,7 @@ fn test_sudo_set_weights_version_key_rate_limit() { SubnetOwner::::insert(netuid, sn_owner); let rate_limit = WeightsVersionKeyRateLimit::::get(); - let tempo: u16 = Tempo::::get(netuid); + let tempo = Tempo::::get(netuid); let rate_limit_period = rate_limit * (tempo as u64); @@ -205,7 +205,7 @@ fn test_sudo_set_weights_version_key_rate_limit() { ); // Wait for rate limit to pass - run_to_block(rate_limit_period + 2); + run_to_block(rate_limit_period + 1); assert!(SubtensorModule::passes_rate_limit_on_subnet( &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, &sn_owner, @@ -1951,3 +1951,129 @@ fn test_sudo_set_commit_reveal_version() { ); }); } + +#[test] +fn test_sudo_set_admin_freeze_window_and_rate() { + new_test_ext().execute_with(|| { + // Non-root fails + assert_eq!( + AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::signed(U256::from(1)), + 7 + ), + Err(DispatchError::BadOrigin) + ); + // Root succeeds + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 7 + )); + assert_eq!(SubtensorModule::get_admin_freeze_window(), 7); + + // Owner hyperparam rate limit setter + assert_eq!( + AdminUtils::sudo_set_owner_hparam_rate_limit( + <::RuntimeOrigin>::signed(U256::from(1)), + 5 + ), + Err(DispatchError::BadOrigin) + ); + assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( + <::RuntimeOrigin>::root(), + 5 + )); + assert_eq!(SubtensorModule::get_owner_hyperparam_rate_limit(), 5); + }); +} + +#[test] +fn test_freeze_window_blocks_root_and_owner() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + // Create subnet with tempo 10 + add_network(netuid, tempo); + // Set freeze window to 3 blocks + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 3 + )); + // Advance to a block where remaining < 3 + run_to_block((tempo - 2).into()); + + // Root should be blocked during freeze window + assert_noop!( + AdminUtils::sudo_set_min_burn( + <::RuntimeOrigin>::root(), + netuid, + 123.into() + ), + SubtensorError::::AdminActionProhibitedDuringWeightsWindow + ); + + // Owner should be blocked during freeze window as well + // Set owner + let owner: U256 = U256::from(9); + SubnetOwner::::insert(netuid, owner); + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 77 + ), + SubtensorError::::AdminActionProhibitedDuringWeightsWindow + ); + }); +} + +#[test] +fn test_owner_hyperparam_update_rate_limit_enforced() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10); + // Set owner + let owner: U256 = U256::from(5); + SubnetOwner::::insert(netuid, owner); + + // Configure owner hyperparam RL to 2 blocks + assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( + <::RuntimeOrigin>::root(), + 2 + )); + + // First update succeeds + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 11 + )); + // Immediate second update fails due to TxRateLimitExceeded + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 12 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance less than limit still fails + run_to_block(SubtensorModule::get_current_block_as_u64() + 1); + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 13 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance one more block to pass the limit; should succeed + run_to_block(SubtensorModule::get_current_block_as_u64() + 1); + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 14 + )); + }); +} diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs new file mode 100644 index 0000000000..a8b3843fa3 --- /dev/null +++ b/pallets/subtensor/src/tests/ensure.rs @@ -0,0 +1,156 @@ +use frame_support::{assert_noop, assert_ok}; +use frame_system::Config; +use sp_core::U256; +use subtensor_runtime_common::NetUid; + +use super::mock::*; +use crate::utils::rate_limiting::TransactionType; +use crate::{RateLimitKey, SubnetOwner, SubtokenEnabled}; + +#[test] +fn ensure_subnet_owner_returns_who_and_checks_ownership() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + add_network(netuid, 10, 0); + + let owner: U256 = U256::from(42); + SubnetOwner::::insert(netuid, owner); + + // Non-owner signed should fail + assert!( + crate::Pallet::::ensure_subnet_owner( + <::RuntimeOrigin>::signed(U256::from(7)), + netuid + ) + .is_err() + ); + + // Owner signed returns who + let who = crate::Pallet::::ensure_subnet_owner( + <::RuntimeOrigin>::signed(owner), + netuid, + ) + .expect("owner must pass"); + assert_eq!(who, owner); + }); +} + +#[test] +fn ensure_subnet_owner_or_root_distinguishes_root_and_owner() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(2); + add_network(netuid, 10, 0); + let owner: U256 = U256::from(9); + SubnetOwner::::insert(netuid, owner); + + // Root path returns None + let root = crate::Pallet::::ensure_subnet_owner_or_root( + <::RuntimeOrigin>::root(), + netuid, + ) + .expect("root allowed"); + assert!(root.is_none()); + + // Owner path returns Some(owner) + let maybe_owner = crate::Pallet::::ensure_subnet_owner_or_root( + <::RuntimeOrigin>::signed(owner), + netuid, + ) + .expect("owner allowed"); + assert_eq!(maybe_owner, Some(owner)); + }); +} + +#[test] +fn ensure_root_with_rate_limit_blocks_in_freeze_window() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + add_network(netuid, 10, 0); + + // Set freeze window to 3 + let freeze_window = 3; + crate::Pallet::::set_admin_freeze_window(freeze_window); + + run_to_block((tempo - freeze_window + 1).into()); + + // Root is blocked in freeze window + assert!( + crate::Pallet::::ensure_root_with_rate_limit( + <::RuntimeOrigin>::root(), + netuid + ) + .is_err() + ); + }); +} + +#[test] +fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo = 10; + add_network(netuid, 10, 0); + SubtokenEnabled::::insert(netuid, true); + let owner: U256 = U256::from(5); + SubnetOwner::::insert(netuid, owner); + // Set freeze window to 3 + let freeze_window = 3; + crate::Pallet::::set_admin_freeze_window(freeze_window); + + // Set owner RL to 2 blocks + crate::Pallet::::set_owner_hyperparam_rate_limit(2); + + // Outside freeze window initially; should pass and return Some(owner) + let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ) + .expect("should pass"); + assert_eq!(res, Some(owner)); + + // Simulate previous update at current block -> next call should fail due to rate limit + let now = crate::Pallet::::get_current_block_as_u64(); + crate::Pallet::::set_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid), + now, + ); + assert_noop!( + crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ), + crate::Error::::TxRateLimitExceeded + ); + + // Advance beyond RL and ensure passes again + run_to_block(now + 3); + assert_ok!(crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[TransactionType::OwnerHyperparamUpdate] + )); + + // Now advance into the freeze window; ensure blocks + // (using loop for clarity, because epoch calculation function uses netuid) + let freeze_window = freeze_window as u64; + loop { + let cur = crate::Pallet::::get_current_block_as_u64(); + let rem = crate::Pallet::::blocks_until_next_epoch(netuid, tempo, cur); + if rem < freeze_window { + break; + } + run_to_block(cur + 1); + } + assert_noop!( + crate::Pallet::::ensure_sn_owner_or_root_with_limits( + <::RuntimeOrigin>::signed(owner), + netuid, + &[TransactionType::OwnerHyperparamUpdate], + ), + crate::Error::::AdminActionProhibitedDuringWeightsWindow + ); + }); +} diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index b743d7c1ff..1f4aa71363 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -5,6 +5,7 @@ mod consensus; mod delegate_info; mod difficulty; mod emission; +mod ensure; mod epoch; mod evm; mod leasing; diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index bdf8bc5147..d0a05766da 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -66,7 +66,6 @@ impl Pallet { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), - TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), _ => Self::get_rate_limit(tx_type), } @@ -135,7 +134,8 @@ impl Pallet { } } - /// Set the block number of the last transaction for a specific hotkey, network, and transaction type + /// Set the block number of the last transaction for a specific hotkey, network, and transaction + /// type pub fn set_last_transaction_block_on_subnet( key: &T::AccountId, netuid: NetUid, From 452390d97667eca8da245e192f51a86ede84b913 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 08:25:00 -0400 Subject: [PATCH 133/379] Use epoch index for super-block calculation --- pallets/subtensor/src/subnets/subsubnet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index d72c696c8b..9c2ba7086f 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,8 +124,8 @@ impl Pallet { // Run once per super-block let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block.checked_rem(super_block) { + let epoch_index = Self::get_epoch_index(*netuid, current_block); + if let Some(rem) = epoch_index.checked_rem(super_block_tempos) { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); From d5f24047b82f5144dd0d81b42f9a84e244dfb306 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Thu, 4 Sep 2025 16:20:53 +0300 Subject: [PATCH 134/379] Reformat --- pallets/admin-utils/src/benchmarking.rs | 12 ++++++++++++ pallets/subtensor/src/utils/misc.rs | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 61df5d55f8..5612100b90 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -346,5 +346,17 @@ mod benchmarks { _(RawOrigin::Root, 5u16/*version*/)/*sudo_set_commit_reveal_version()*/; } + #[benchmark] + fn sudo_set_admin_freeze_window() { + #[extrinsic_call] + _(RawOrigin::Root, 5u16/*window*/)/*sudo_set_admin_freeze_window*/; + } + + #[benchmark] + fn sudo_set_owner_hparam_rate_limit() { + #[extrinsic_call] + _(RawOrigin::Root, 10u64/*limit*/)/*sudo_set_owner_hparam_rate_limit*/; + } + //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 14b617a0e0..0951cd3ead 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -50,7 +50,7 @@ impl Pallet { /// Ensure owner-or-root with a set of TransactionType rate checks (owner only). /// - Root: only freeze window is enforced; no TransactionType checks. /// - Owner (Signed): freeze window plus all rate checks in `limits` using signer extracted from - /// origin. + /// origin. pub fn ensure_sn_owner_or_root_with_limits( o: T::RuntimeOrigin, netuid: NetUid, From 23537679b8d4f326d6d3d504883544394f440aba Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 11:50:40 -0400 Subject: [PATCH 135/379] Fix super-block decrease test, add super-block increase test --- pallets/subtensor/src/subnets/subsubnet.rs | 4 +-- pallets/subtensor/src/tests/subsubnet.rs | 39 ++++++++++++++++++---- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 9c2ba7086f..cdee6db745 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,8 +124,8 @@ impl Pallet { // Run once per super-block let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let epoch_index = Self::get_epoch_index(*netuid, current_block); - if let Some(rem) = epoch_index.checked_rem(super_block_tempos) { + let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); + if let Some(rem) = current_block.saturating_add(u16::from(*netuid) as u64).checked_rem(super_block) { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 78fbefa681..2c1f4f2051 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -287,17 +287,17 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - Tempo::::insert(netuid, 1u16); + // super_block = SuperBlockTempos() * Tempo(netuid) - netuid + Tempo::::insert(netuid, 360u16); let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY decrease-per-superblock. // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); // ≥3 + let old = SubId::from(dec.saturating_add(3)); let desired = SubId::from(1u8); - // min_possible = max(old - dec, 1) = 3 → new_count = 3 + // min_capped = max(old - dec, 1) = 3 => new_count = 3 SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); @@ -336,7 +336,7 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { ); // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(2 * super_block); + SubtensorModule::update_subsubnet_counts_if_needed(super_block); // New count is 3 assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); @@ -363,6 +363,33 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { }); } +#[test] +fn update_subsubnet_counts_increases_on_superblock() { + new_test_ext(1).execute_with(|| { + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // super_block = SuperBlockTempos() * Tempo(netuid) - netuid + Tempo::::insert(netuid, 360u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + + // Choose counts so result is deterministic for ANY increase-per-superblock. + let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(1u8); + let desired = SubId::from(5u8); + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Act exactly on a super-block boundary + SubtensorModule::update_subsubnet_counts_if_needed(super_block); + + // New count is old + inc + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1 + inc)); + }); +} + #[test] fn update_subsubnet_counts_no_change_when_not_superblock() { new_test_ext(1).execute_with(|| { From 0eaf230f6a7d73c2b84581cf5a76506122d00d19 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 11:54:42 -0400 Subject: [PATCH 136/379] Add testing of SubsubnetEmissionSplit reset --- pallets/subtensor/src/subnets/subsubnet.rs | 5 +++- pallets/subtensor/src/tests/subsubnet.rs | 33 ++++++++++++++++------ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index cdee6db745..63f2aa0a0d 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -125,7 +125,10 @@ impl Pallet { let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block.saturating_add(u16::from(*netuid) as u64).checked_rem(super_block) { + if let Some(rem) = current_block + .saturating_add(u16::from(*netuid) as u64) + .checked_rem(super_block) + { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 2c1f4f2051..7a51da0768 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -27,15 +27,15 @@ // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [ ] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward -// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase -// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease +// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase +// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -289,8 +289,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { // super_block = SuperBlockTempos() * Tempo(netuid) - netuid Tempo::::insert(netuid, 360u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + let super_block = u64::from(SuperBlockTempos::::get()) + * u64::from(Tempo::::get(netuid)) + - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY decrease-per-superblock. // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. @@ -301,6 +302,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + // Seed data at a kept subid (2) and a removed subid (3) let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); @@ -360,6 +364,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { assert!(!TimelockedWeightCommits::::contains_key( idx_rm3, 1u64 )); + + // SubsubnetEmissionSplit is reset on super-block + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } @@ -372,8 +379,9 @@ fn update_subsubnet_counts_increases_on_superblock() { // super_block = SuperBlockTempos() * Tempo(netuid) - netuid Tempo::::insert(netuid, 360u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + let super_block = u64::from(SuperBlockTempos::::get()) + * u64::from(Tempo::::get(netuid)) + - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY increase-per-superblock. let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); @@ -382,11 +390,20 @@ fn update_subsubnet_counts_increases_on_superblock() { SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + // Act exactly on a super-block boundary SubtensorModule::update_subsubnet_counts_if_needed(super_block); // New count is old + inc - assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1 + inc)); + assert_eq!( + SubsubnetCountCurrent::::get(netuid), + SubId::from(1 + inc) + ); + + // SubsubnetEmissionSplit is reset on super-block + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } From 412a24766d39361a1485eac555ef1c36f4267c98 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:05:34 -0400 Subject: [PATCH 137/379] Add emission tests --- pallets/subtensor/src/subnets/subsubnet.rs | 52 +++- pallets/subtensor/src/tests/coinbase.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 297 ++++++++++++++++++++- 3 files changed, 343 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 63f2aa0a0d..98f339bfb5 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -286,8 +286,11 @@ impl Pallet { // Calculate subsubnet weight from the split emission (not the other way because preserving // emission accuracy is the priority) - let sub_weight = U64F64::saturating_from_num(sub_emission) - .safe_div(U64F64::saturating_from_num(rao_emission)); + // For zero emission the first subsubnet gets full weight + let sub_weight = U64F64::saturating_from_num(sub_emission).safe_div_or( + U64F64::saturating_from_num(rao_emission), + U64F64::saturating_from_num(if sub_id_u8 == 0 { 1 } else { 0 }), + ); // Produce an iterator of (hotkey, (terms, sub_weight)) tuples epoch_output @@ -346,7 +349,50 @@ impl Pallet { ); acc_terms.new_validator_permit |= terms.new_validator_permit; }) - .or_insert(terms); + .or_insert_with(|| { + // weighted insert for the first sub-subnet seen for this hotkey + EpochTerms { + uid: terms.uid, + dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), + incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), + validator_emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.validator_emission, + sub_weight, + ), + server_emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.server_emission, + sub_weight, + ), + stake_weight: Self::weighted_acc_u16( + 0, + terms.stake_weight, + sub_weight, + ), + active: terms.active, // booleans are ORed across subs + emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.emission, + sub_weight, + ), + rank: Self::weighted_acc_u16(0, terms.rank, sub_weight), + trust: Self::weighted_acc_u16(0, terms.trust, sub_weight), + consensus: Self::weighted_acc_u16(0, terms.consensus, sub_weight), + pruning_score: Self::weighted_acc_u16( + 0, + terms.pruning_score, + sub_weight, + ), + validator_trust: Self::weighted_acc_u16( + 0, + terms.validator_trust, + sub_weight, + ), + new_validator_permit: terms.new_validator_permit, + bond: Vec::new(), // aggregated map doesn’t use bonds; keep empty + } + }); acc }); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index d27e42f445..a196cfc00e 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -2441,6 +2441,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { }); } +// cargo test --package pallet-subtensor --lib -- tests::coinbase::test_drain_pending_emission_zero_emission --exact --show-output #[test] fn test_drain_pending_emission_zero_emission() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 7a51da0768..4f4902df42 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -29,10 +29,10 @@ // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared -// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms -// - [ ] Subnet epoch terms persist in state +// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [x] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state -// - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake +// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward // - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase // - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease @@ -41,6 +41,7 @@ use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use alloc::collections::BTreeMap; use approx::assert_abs_diff_eq; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use codec::Encode; @@ -52,7 +53,7 @@ use sha2::Digest; use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; -use substrate_fixed::types::I32F32; +use substrate_fixed::types::{I32F32, U64F64}; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use tle::{ curves::drand::TinyBLS381, ibe::fullident::Identity, @@ -663,6 +664,190 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { }); } +#[test] +fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Three neurons: validator (uid=0) + two miners (uid=1,2) + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000u64); + + // Healthy minimal state and 3rd neuron + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + let uid0 = 0_usize; + let uid1 = 1_usize; + let uid2 = 2_usize; + + // Two sub-subnets with non-equal split (~25% / 75%) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let split0 = u16::MAX / 4; + let split1 = u16::MAX - split0; + SubsubnetEmissionSplit::::insert(netuid, vec![split0, split1]); + + // One validator; skew weights differently per sub-subnet + ValidatorPermit::::insert(netuid, vec![true, false, false]); + // sub 0: uid1 heavy, uid2 light + Weights::::insert( + idx0, + 0, + vec![(1u16, 0xFFFF / 5 * 3), (2u16, 0xFFFF / 5 * 2)], + ); + // sub 1: uid1 light, uid2 heavy + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + // Per-sub emissions (and weights used for aggregation) + let subsubnet_emissions = SubtensorModule::split_emissions(netuid, emission); + let w0 = U64F64::from_num(u64::from(subsubnet_emissions[0])) + / U64F64::from_num(u64::from(emission)); + let w1 = U64F64::from_num(u64::from(subsubnet_emissions[1])) + / U64F64::from_num(u64::from(emission)); + assert_abs_diff_eq!(w0.to_num::(), 0.25, epsilon = 0.0001); + assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); + + // Get per-subsubnet epoch outputs to build expectations + let out0 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(0), subsubnet_emissions[0]); + let out1 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(1), subsubnet_emissions[1]); + + // Now run the real aggregated path (also persists terms) + let agg = SubtensorModule::epoch_with_subsubnets(netuid, emission); + + // hotkey -> (server_emission_u64, validator_emission_u64) + let agg_map: BTreeMap = agg + .into_iter() + .map(|(hk, se, ve)| (hk, (u64::from(se), u64::from(ve)))) + .collect(); + + // Helper to fetch per-sub terms by hotkey + let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); + let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); + + // Returned aggregated emissions match weighted sums + for hk in [&hk1, &hk2] { + let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); + let t0 = terms0(hk); + let t1 = terms1(hk); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) * w0 + + U64F64::saturating_from_num(u64::from(t1.server_emission)) * w1) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) * w0 + + U64F64::saturating_from_num(u64::from(t1.validator_emission)) * w1) + .saturating_to_num::(); + assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); + assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); + } + + // Persisted per-subsubnet Incentive vectors match per-sub terms + let inc0 = Incentive::::get(idx0); + let inc1 = Incentive::::get(idx1); + let exp_inc0 = { + let mut v = vec![0u16; 3]; + v[terms0(&hk0).uid] = terms0(&hk0).incentive; + v[terms0(&hk1).uid] = terms0(&hk1).incentive; + v[terms0(&hk2).uid] = terms0(&hk2).incentive; + v + }; + let exp_inc1 = { + let mut v = vec![0u16; 3]; + v[terms1(&hk0).uid] = terms1(&hk0).incentive; + v[terms1(&hk1).uid] = terms1(&hk1).incentive; + v[terms1(&hk2).uid] = terms1(&hk2).incentive; + v + }; + for (a, e) in inc0.iter().zip(exp_inc0.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + for (a, e) in inc1.iter().zip(exp_inc1.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + + // Persisted Bonds for validator (uid0) exist and mirror per-sub terms + let b0 = Bonds::::get(idx0, 0u16); + let b1 = Bonds::::get(idx1, 0u16); + let exp_b0 = &terms0(&hk0).bond; + let exp_b1 = &terms1(&hk0).bond; + + assert!(!b0.is_empty(), "bonds sub0 empty"); + assert!(!b1.is_empty(), "bonds sub1 empty"); + assert_eq!(b0.len(), exp_b0.len()); + assert_eq!(b1.len(), exp_b1.len()); + for ((u_a, w_a), (u_e, w_e)) in b0.iter().zip(exp_b0.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + for ((u_a, w_a), (u_e, w_e)) in b1.iter().zip(exp_b1.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + + // Persisted subnet-level terms are weighted/OR aggregates of sub-subnets + // Fetch persisted vectors + let active = Active::::get(netuid); + let emission_v = Emission::::get(netuid); + let rank_v = Rank::::get(netuid); + let trust_v = Trust::::get(netuid); + let cons_v = Consensus::::get(netuid); + let div_v = Dividends::::get(netuid); + let prun_v = PruningScores::::get(netuid); + let vtrust_v = ValidatorTrust::::get(netuid); + let vperm_v = ValidatorPermit::::get(netuid); + + // Helpers for weighted u16 / u64 + let wu16 = |a: u16, b: u16| -> u16 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + let wu64 = |a: u64, b: u64| -> u64 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + + // For each UID, compute expected aggregate from out0/out1 terms + let check_uid = |uid: usize, hk: &U256| { + let t0 = terms0(hk); + let t1 = terms1(hk); + + // Active & ValidatorPermit are OR-aggregated + assert_eq!(active[uid], t0.active || t1.active); + assert_eq!( + vperm_v[uid], + t0.new_validator_permit || t1.new_validator_permit + ); + + // Emission (u64) + let exp_em = wu64(u64::from(t0.emission), u64::from(t1.emission)); + assert_abs_diff_eq!(u64::from(emission_v[uid]), exp_em, epsilon = 1); + + // u16 terms + assert_abs_diff_eq!(rank_v[uid], wu16(t0.rank, t1.rank), epsilon = 1); + assert_abs_diff_eq!(trust_v[uid], wu16(t0.trust, t1.trust), epsilon = 1); + assert_abs_diff_eq!(cons_v[uid], wu16(t0.consensus, t1.consensus), epsilon = 1); + assert_abs_diff_eq!(div_v[uid], wu16(t0.dividend, t1.dividend), epsilon = 1); + assert_abs_diff_eq!( + prun_v[uid], + wu16(t0.pruning_score, t1.pruning_score), + epsilon = 1 + ); + assert_abs_diff_eq!( + vtrust_v[uid], + wu16(t0.validator_trust, t1.validator_trust), + epsilon = 1 + ); + }; + + check_uid(uid0, &hk0); + check_uid(uid1, &hk1); + check_uid(uid2, &hk2); + }); +} + #[test] fn epoch_with_subsubnets_no_weight_no_incentive() { new_test_ext(1).execute_with(|| { @@ -1360,3 +1545,107 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { )); }); } + +#[test] +fn epoch_subsubnet_emergency_mode_distributes_by_stake() { + new_test_ext(1).execute_with(|| { + // setup a single sub-subnet where consensus sum becomes 0 + let netuid = NetUid::from(1u16); + let subid = SubId::from(1u8); + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + SubtensorModule::set_max_registrations_per_block(netuid, 4); + SubtensorModule::set_target_registrations_per_interval(netuid, 4); + + // three neurons: make ALL permitted validators so active_stake is non-zero + let hk0 = U256::from(10); + let ck0 = U256::from(11); + let hk1 = U256::from(20); + let ck1 = U256::from(21); + let hk2 = U256::from(30); + let ck2 = U256::from(31); + let hk3 = U256::from(40); // miner + let ck3 = U256::from(41); + register_ok_neuron(netuid, hk0, ck0, 0); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + // active + recent updates so they're all active + let now = SubtensorModule::get_current_block_as_u64(); + ActivityCutoff::::insert(netuid, 1_000u16); + LastUpdate::::insert(idx, vec![now, now, now, now]); + + // All staking validators permitted => active_stake = stake + ValidatorPermit::::insert(netuid, vec![true, true, true, false]); + SubtensorModule::set_stake_threshold(0); + + // force ZERO consensus/incentive path: no weights/bonds + // (leave Weights/Bonds empty for all rows on this sub-subnet) + + // stake proportions: uid0:uid1:uid2 = 10:30:60 + SubtensorModule::add_balance_to_coldkey_account(&ck0, 10); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 30); + SubtensorModule::add_balance_to_coldkey_account(&ck2, 60); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + AlphaCurrency::from(10), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + AlphaCurrency::from(30), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk2, + &ck2, + netuid, + AlphaCurrency::from(60), + ); + + let emission = AlphaCurrency::from(1_000_000u64); + + // --- act: run epoch on this sub-subnet only --- + let out = SubtensorModule::epoch_subsubnet(netuid, subid, emission); + + // collect validator emissions per hotkey + let t0 = out.0.get(&hk0).unwrap(); + let t1 = out.0.get(&hk1).unwrap(); + let t2 = out.0.get(&hk2).unwrap(); + let t3 = out.0.get(&hk3).unwrap(); + + // In emergency mode (consensus sum == 0): + // - validator_emission is distributed by (active) stake proportions + // - server_emission remains zero (incentive path is zero) + assert_eq!(u64::from(t0.server_emission), 0); + assert_eq!(u64::from(t1.server_emission), 0); + assert_eq!(u64::from(t2.server_emission), 0); + assert_eq!(u64::from(t3.server_emission), 0); + + // expected splits by stake: 10%, 30%, 60% of total emission + let e = u64::from(emission); + let exp0 = e / 10; // 10% + let exp1 = e * 3 / 10; // 30% + let exp2 = e * 6 / 10; // 60% + + // allow tiny rounding drift from fixed-point conversions + assert_abs_diff_eq!(u64::from(t0.validator_emission), exp0, epsilon = 2); + assert_abs_diff_eq!(u64::from(t1.validator_emission), exp1, epsilon = 2); + assert_abs_diff_eq!(u64::from(t2.validator_emission), exp2, epsilon = 2); + assert_eq!(u64::from(t3.validator_emission), 0); + + // all emission goes to validators + assert_abs_diff_eq!( + u64::from(t0.validator_emission) + + u64::from(t1.validator_emission) + + u64::from(t2.validator_emission), + e, + epsilon = 2 + ); + }); +} From 700a8828de105f0b4fa063309b23b3282fd658f2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:11:06 -0400 Subject: [PATCH 138/379] Format --- pallets/admin-utils/src/tests/mod.rs | 2 +- pallets/subtensor/src/macros/dispatches.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index b1f88b6826..3403bbf97b 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2113,4 +2113,4 @@ fn test_sudo_set_desired_subsubnet_count() { ss_count_ok )); }); -} \ No newline at end of file +} diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 8215c846a4..1b6c0c13b3 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -94,7 +94,7 @@ mod dispatches { } } - /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call + /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call /// can be made from the hotkey account so is potentially insecure, however, the damage /// of changing weights is minimal if caught early. This function includes all the /// checks that the passed weights meet the requirements. Stored as u16s they represent @@ -113,7 +113,7 @@ mod dispatches { /// /// * `netuid` (u16): /// - The network uid we are setting these weights on. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -251,7 +251,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -372,7 +372,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -470,7 +470,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -2293,7 +2293,7 @@ mod dispatches { Ok(()) } - /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for /// a subsubnet. /// /// # Args: From b919b5de03f14ff9cce02dfc6e53b69557e6e327 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:21:58 -0400 Subject: [PATCH 139/379] Fix merge --- pallets/admin-utils/src/tests/mod.rs | 1 + pallets/subtensor/src/macros/dispatches.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 3403bbf97b..79c237f526 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1955,6 +1955,7 @@ fn test_sudo_set_commit_reveal_version() { }); } +#[test] fn test_sudo_set_min_burn() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 1b6c0c13b3..a94f44f849 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -155,7 +155,7 @@ mod dispatches { /// /// * 'MaxWeightExceeded': /// - Attempting to set weights with max value exceeding limit. - #[pallet::call_index(114)] + #[pallet::call_index(119)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) .saturating_add(T::DbWeight::get().reads(4111)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] From 56453e994af958f525d6882fb09274799046482d Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 16:08:25 -0400 Subject: [PATCH 140/379] Fix clippy --- pallets/subtensor/src/subnets/subsubnet.rs | 4 ++++ runtime/src/lib.rs | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 98f339bfb5..73dcef569a 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -67,6 +67,10 @@ impl Pallet { } } + pub fn get_current_subsubnet_count(netuid: NetUid) -> SubId { + SubsubnetCountCurrent::::get(netuid) + } + pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { // Make sure the base subnet exists ensure!( diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 73ddfb751f..347f117a31 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1056,7 +1056,12 @@ pub struct ResetBondsOnCommit; impl OnMetadataCommitment for ResetBondsOnCommit { #[cfg(not(feature = "runtime-benchmarks"))] fn on_metadata_commitment(netuid: NetUid, address: &AccountId) { - let _ = SubtensorModule::do_reset_bonds(netuid, address); + // Reset bonds for each subsubnet of this subnet + let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); + for subid in 0..u8::from(subsub_count) { + let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); + let _ = SubtensorModule::do_reset_bonds(netuid, address); + } } #[cfg(feature = "runtime-benchmarks")] From 5811d0f77920c5ae48ff64a3f46dfa8a8150aebc Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 16:34:44 -0400 Subject: [PATCH 141/379] Fix clippy --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 347f117a31..3ab68aa3f5 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1060,7 +1060,7 @@ impl OnMetadataCommitment for ResetBondsOnCommit { let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); for subid in 0..u8::from(subsub_count) { let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); - let _ = SubtensorModule::do_reset_bonds(netuid, address); + let _ = SubtensorModule::do_reset_bonds(netuid_index, address); } } From 2cd3f5eb713fcf205280b216297017e97fad210a Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 18:15:58 -0400 Subject: [PATCH 142/379] Fix emission aggregation --- pallets/subtensor/src/subnets/subsubnet.rs | 31 ++++++++-------------- pallets/subtensor/src/tests/subsubnet.rs | 14 +++++----- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 73dcef569a..a00e8a5bfe 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -306,21 +306,20 @@ impl Pallet { .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { acc.entry(hotkey) .and_modify(|acc_terms| { + // Server and validator emission come from subsubnet emission and need to be added up + acc_terms.validator_emission = acc_terms + .validator_emission + .saturating_add(terms.validator_emission); + acc_terms.server_emission = acc_terms + .server_emission + .saturating_add(terms.server_emission); + + // The rest of the terms need to be aggregated as weighted sum acc_terms.dividend = Self::weighted_acc_u16( acc_terms.dividend, terms.dividend, sub_weight, ); - acc_terms.validator_emission = Self::weighted_acc_alpha( - acc_terms.validator_emission, - terms.validator_emission, - sub_weight, - ); - acc_terms.server_emission = Self::weighted_acc_alpha( - acc_terms.server_emission, - terms.server_emission, - sub_weight, - ); acc_terms.stake_weight = Self::weighted_acc_u16( acc_terms.stake_weight, terms.stake_weight, @@ -359,16 +358,8 @@ impl Pallet { uid: terms.uid, dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), - validator_emission: Self::weighted_acc_alpha( - 0u64.into(), - terms.validator_emission, - sub_weight, - ), - server_emission: Self::weighted_acc_alpha( - 0u64.into(), - terms.server_emission, - sub_weight, - ), + validator_emission: terms.validator_emission, + server_emission: terms.server_emission, stake_weight: Self::weighted_acc_u16( 0, terms.stake_weight, diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 4f4902df42..08612ef684 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -729,17 +729,17 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); - // Returned aggregated emissions match weighted sums + // Returned aggregated emissions match plain sums of subsubnet emissions for hk in [&hk1, &hk2] { let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); let t0 = terms0(hk); let t1 = terms1(hk); - let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) * w0 - + U64F64::saturating_from_num(u64::from(t1.server_emission)) * w1) - .saturating_to_num::(); - let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) * w0 - + U64F64::saturating_from_num(u64::from(t1.validator_emission)) * w1) - .saturating_to_num::(); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) + + U64F64::saturating_from_num(u64::from(t1.server_emission))) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) + + U64F64::saturating_from_num(u64::from(t1.validator_emission))) + .saturating_to_num::(); assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); } From f991ecac5d48dc17a0f913cb955af5bf7132d511 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 18:29:02 -0400 Subject: [PATCH 143/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 3ab68aa3f5..e7ad12af7b 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 309, + spec_version: 310, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 75ea5962dfa2a3724a5f5b78133e4b74e169a8ae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 5 Sep 2025 00:37:06 +0000 Subject: [PATCH 144/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index a94f44f849..340bba171a 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -932,7 +932,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(28_660_000, 0) + #[pallet::weight((Weight::from_parts(42_000_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -2249,7 +2249,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From c295eaaa2cacae5443cf2354759c4726cd1af967 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 5 Sep 2025 15:48:10 +0300 Subject: [PATCH 145/379] Update spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index cc9c02eca3..644a85ebcd 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 309, + spec_version: 310, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 8a7661c6a9db16737d8b437783acb2a535644b71 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 5 Sep 2025 15:57:22 +0300 Subject: [PATCH 146/379] Prove default hyperparam setting rate limit doesn't block --- pallets/admin-utils/src/tests/mod.rs | 32 ++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 15d6f634f5..df11fd9912 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2137,6 +2137,38 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { }); } +// Verifies that when the owner hyperparameter rate limit is left at its default (0), hyperparameter +// updates are not blocked until a non-zero value is set. +#[test] +fn test_hyperparam_rate_limit_not_blocking_with_default() { + new_test_ext().execute_with(|| { + // Setup subnet and owner + let netuid = NetUid::from(42); + add_network(netuid, 10); + let owner: U256 = U256::from(77); + SubnetOwner::::insert(netuid, owner); + + // Read the default (unset) owner hyperparam rate limit + let default_limit = SubtensorModule::get_owner_hyperparam_rate_limit(); + + assert_eq!(default_limit, 0); + + // First owner update should always succeed + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 1 + )); + + // With default == 0, second immediate update should also pass (no rate limiting) + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 2 + )); + }); +} + #[test] fn test_sudo_set_max_burn() { new_test_ext().execute_with(|| { From a0830a784bca2bb2cfa8dbfa6f9b237b72f27a16 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Fri, 5 Sep 2025 10:42:32 -0700 Subject: [PATCH 147/379] don't remove LastAdjustmentBlock --- pallets/subtensor/src/coinbase/root.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index aaa85522f5..66f751d9d7 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -513,7 +513,6 @@ impl Pallet { MaxBurn::::remove(netuid); MinDifficulty::::remove(netuid); MaxDifficulty::::remove(netuid); - LastAdjustmentBlock::::remove(netuid); RegistrationsThisBlock::::remove(netuid); EMAPriceHalvingBlocks::::remove(netuid); RAORecycledForRegistration::::remove(netuid); From cd565b2391ecec4caee8fce92405f5900bb49feb Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sat, 6 Sep 2025 13:52:39 -0700 Subject: [PATCH 148/379] try_initialize_v3 --- pallets/subtensor/src/subnets/subnet.rs | 25 ++++++++++++++----------- pallets/swap-interface/src/lib.rs | 1 + pallets/swap/src/pallet/impls.rs | 3 +++ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 522e267f3c..3460499b89 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -1,6 +1,7 @@ use super::*; use sp_core::Get; use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_swap_interface::SwapHandler; impl Pallet { /// Fetches the total count of subnets. @@ -101,23 +102,23 @@ impl Pallet { /// Facilitates user registration of a new subnetwork. /// /// ### Args - /// * **`origin`** – `T::RuntimeOrigin`  Must be **signed** by the coldkey. - /// * **`hotkey`** – `&T::AccountId`  First neuron of the new subnet. - /// * **`mechid`** – `u16`  Only the dynamic mechanism (`1`) is currently supported. + /// * **`origin`** – `T::RuntimeOrigin`  Must be **signed** by the coldkey. + /// * **`hotkey`** – `&T::AccountId`  First neuron of the new subnet. + /// * **`mechid`** – `u16`  Only the dynamic mechanism (`1`) is currently supported. /// * **`identity`** – `Option`  Optional metadata for the subnet. /// /// ### Events - /// * `NetworkAdded(netuid, mechid)` – always. - /// * `SubnetIdentitySet(netuid)` – when a custom identity is supplied. + /// * `NetworkAdded(netuid, mechid)` – always. + /// * `SubnetIdentitySet(netuid)` – when a custom identity is supplied. /// * `NetworkRemoved(netuid)` – when a subnet is pruned to make room. /// /// ### Errors - /// * `NonAssociatedColdKey` – `hotkey` already belongs to another coldkey. - /// * `MechanismDoesNotExist` – unsupported `mechid`. - /// * `NetworkTxRateLimitExceeded` – caller hit the register-network rate limit. - /// * `SubnetLimitReached` – limit hit **and** no eligible subnet to prune. - /// * `CannotAffordLockCost` – caller lacks the lock cost. - /// * `BalanceWithdrawalError` – failed to lock balance. + /// * `NonAssociatedColdKey` – `hotkey` already belongs to another coldkey. + /// * `MechanismDoesNotExist` – unsupported `mechid`. + /// * `NetworkTxRateLimitExceeded` – caller hit the register-network rate limit. + /// * `SubnetLimitReached` – limit hit **and** no eligible subnet to prune. + /// * `CannotAffordLockCost` – caller lacks the lock cost. + /// * `BalanceWithdrawalError` – failed to lock balance. /// * `InvalidIdentity` – supplied `identity` failed validation. /// pub fn do_register_network( @@ -255,6 +256,8 @@ impl Pallet { Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); } + T::SwapInterface::try_initialize_v3(netuid_to_register)?; + // --- 18. Emit the NetworkAdded event. log::info!("NetworkAdded( netuid:{netuid_to_register:?}, mechanism:{mechid:?} )"); Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index 268893f6a1..e0319c04b2 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -35,6 +35,7 @@ pub trait SwapHandler { ); fn is_user_liquidity_enabled(netuid: NetUid) -> bool; fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult; + fn try_initialize_v3(netuid: NetUid) -> DispatchResult; } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 4d191e71d9..7053119562 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1575,6 +1575,9 @@ impl SwapHandler for Pallet { fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { Self::do_dissolve_all_liquidity_providers(netuid) } + fn try_initialize_v3(netuid: NetUid) -> DispatchResult { + Self::maybe_initialize_v3(netuid).map_err(|e| e.into()) + } } #[derive(Debug, PartialEq)] From 1b33b6f2bcf56ca1883241aece1543028367b24e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 7 Sep 2025 18:12:40 -0700 Subject: [PATCH 149/379] toggle_user_liquidity --- pallets/subtensor/src/subnets/subnet.rs | 14 +------------- pallets/swap-interface/src/lib.rs | 2 +- pallets/swap/src/pallet/impls.rs | 10 +++------- 3 files changed, 5 insertions(+), 21 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 3460499b89..be42a09dde 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -4,17 +4,6 @@ use subtensor_runtime_common::{NetUid, TaoCurrency}; use subtensor_swap_interface::SwapHandler; impl Pallet { - /// Fetches the total count of subnets. - /// - /// This function retrieves the total number of subnets present on the chain. - /// - /// # Returns: - /// * 'u16': The total number of subnets. - /// - pub fn get_num_subnets() -> u16 { - TotalNetworks::::get() - } - /// Returns true if the subnetwork exists. /// /// This function checks if a subnetwork with the given UID exists. @@ -256,8 +245,7 @@ impl Pallet { Self::deposit_event(Event::SubnetIdentitySet(netuid_to_register)); } - T::SwapInterface::try_initialize_v3(netuid_to_register)?; - + T::SwapInterface::toggle_user_liquidity(netuid_to_register, true); // --- 18. Emit the NetworkAdded event. log::info!("NetworkAdded( netuid:{netuid_to_register:?}, mechanism:{mechid:?} )"); Self::deposit_event(Event::NetworkAdded(netuid_to_register, mechid)); diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index e0319c04b2..d247b28d35 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -35,7 +35,7 @@ pub trait SwapHandler { ); fn is_user_liquidity_enabled(netuid: NetUid) -> bool; fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult; - fn try_initialize_v3(netuid: NetUid) -> DispatchResult; + fn toggle_user_liquidity(netuid: NetUid, enabled: bool); } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 7053119562..75f050632c 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1360,14 +1360,10 @@ impl Pallet { /// - **V2 / non‑V3 path**: /// * No per‑position records exist; still defensively clear the same V3 storages (safe no‑ops). pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { - let mechid = T::SubnetInfo::mechanism(netuid.into()); - let v3_initialized = SwapV3Initialized::::get(netuid); let user_lp_enabled = >::is_user_liquidity_enabled(netuid); - let is_v3_mode = mechid == 1 && v3_initialized; - - if is_v3_mode { + if SwapV3Initialized::::get(netuid) { // -------- V3: close every position, aggregate refunds, clear state -------- // 1) Snapshot all (owner, position_id). @@ -1575,8 +1571,8 @@ impl SwapHandler for Pallet { fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { Self::do_dissolve_all_liquidity_providers(netuid) } - fn try_initialize_v3(netuid: NetUid) -> DispatchResult { - Self::maybe_initialize_v3(netuid).map_err(|e| e.into()) + fn toggle_user_liquidity(netuid: NetUid, enabled: bool) { + EnabledUserLiquidity::::insert(netuid, enabled) } } From 982c0053bed97e592d54b8fbaa85acbff222d63c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 8 Sep 2025 09:17:35 -0700 Subject: [PATCH 150/379] 128 subnet limit --- pallets/subtensor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 230a8b85a5..c37311c3a8 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -870,7 +870,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for subnet limit. pub fn DefaultSubnetLimit() -> u16 { - 148 + 128 } #[pallet::storage] From 0700740fe6a2ec6558b0c1f9b8d18edd1d6fcf04 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:28:03 -0700 Subject: [PATCH 151/379] purge commitments --- pallets/admin-utils/src/tests/mock.rs | 6 ++ pallets/commitments/src/lib.rs | 12 +++ pallets/commitments/src/tests.rs | 121 +++++++++++++++++++++- pallets/subtensor/src/coinbase/root.rs | 2 + pallets/subtensor/src/lib.rs | 5 + pallets/subtensor/src/macros/config.rs | 4 + pallets/subtensor/src/tests/mock.rs | 6 ++ pallets/transaction-fee/src/tests/mock.rs | 6 ++ runtime/src/lib.rs | 12 ++- 9 files changed, 171 insertions(+), 3 deletions(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 1308672d6d..d1667abb36 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -223,6 +223,7 @@ impl pallet_subtensor::Config for Test { type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; + type CommitmentsInterface = CommitmentsI; } parameter_types! { @@ -349,6 +350,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl pallet_subtensor::CommitmentsInterface for CommitmentsI { + fn purge_netuid(netuid: NetUid) {} +} + pub struct GrandpaInterfaceImpl; impl crate::GrandpaInterface for GrandpaInterfaceImpl { fn schedule_change( diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index 34192b6fa2..9e1e3d40b3 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -537,4 +537,16 @@ impl Pallet { Ok(()) } + + pub fn purge_netuid(netuid: NetUid) { + let _ = CommitmentOf::::clear_prefix(netuid, u32::MAX, None); + let _ = LastCommitment::::clear_prefix(netuid, u32::MAX, None); + let _ = LastBondsReset::::clear_prefix(netuid, u32::MAX, None); + let _ = RevealedCommitments::::clear_prefix(netuid, u32::MAX, None); + let _ = UsedSpaceOf::::clear_prefix(netuid, u32::MAX, None); + + TimelockedIndex::::mutate(|index| { + index.retain(|(n, _)| *n != netuid); + }); + } } diff --git a/pallets/commitments/src/tests.rs b/pallets/commitments/src/tests.rs index 6866ebdeec..5f19070ea2 100644 --- a/pallets/commitments/src/tests.rs +++ b/pallets/commitments/src/tests.rs @@ -4,8 +4,9 @@ use subtensor_runtime_common::NetUid; #[cfg(test)] use crate::{ - BalanceOf, CommitmentInfo, CommitmentOf, Config, Data, Error, Event, MaxSpace, Pallet, - Registration, RevealedCommitments, TimelockedIndex, UsedSpaceOf, + BalanceOf, CommitmentInfo, CommitmentOf, Config, Data, Error, Event, LastBondsReset, + LastCommitment, MaxSpace, Pallet, Registration, RevealedCommitments, TimelockedIndex, + UsageTracker, UsedSpaceOf, mock::{ Balances, DRAND_QUICKNET_SIG_2000_HEX, DRAND_QUICKNET_SIG_HEX, RuntimeEvent, RuntimeOrigin, Test, TestMaxFields, insert_drand_pulse, new_test_ext, produce_ciphertext, @@ -2185,3 +2186,119 @@ fn mixed_timelocked_and_raw_fields_works() { ); }); } + +#[test] +fn purge_netuid_clears_only_that_netuid() { + new_test_ext().execute_with(|| { + // Setup + System::::set_block_number(1); + + let net_a = NetUid::from(42); + let net_b = NetUid::from(43); + let who_a1: u64 = 1001; + let who_a2: u64 = 1002; + let who_b: u64 = 2001; + + // Minimal commitment payload + let empty_fields: BoundedVec::MaxFields> = BoundedVec::default(); + let info_empty: CommitmentInfo<::MaxFields> = CommitmentInfo { + fields: empty_fields, + }; + let bn = System::::block_number(); + + // Seed NET A with two accounts across all tracked storages + let reg_a1 = Registration { + deposit: Default::default(), + block: bn, + info: info_empty.clone(), + }; + let reg_a2 = Registration { + deposit: Default::default(), + block: bn, + info: info_empty.clone(), + }; + CommitmentOf::::insert(net_a, who_a1, reg_a1); + CommitmentOf::::insert(net_a, who_a2, reg_a2); + LastCommitment::::insert(net_a, who_a1, bn); + LastCommitment::::insert(net_a, who_a2, bn); + LastBondsReset::::insert(net_a, who_a1, bn); + RevealedCommitments::::insert(net_a, who_a1, vec![(b"a".to_vec(), 7u64)]); + UsedSpaceOf::::insert( + net_a, + who_a1, + UsageTracker { + last_epoch: 1, + used_space: 123, + }, + ); + + // Seed NET B with one account that must remain intact + let reg_b = Registration { + deposit: Default::default(), + block: bn, + info: info_empty, + }; + CommitmentOf::::insert(net_b, who_b, reg_b); + LastCommitment::::insert(net_b, who_b, bn); + LastBondsReset::::insert(net_b, who_b, bn); + RevealedCommitments::::insert(net_b, who_b, vec![(b"b".to_vec(), 8u64)]); + UsedSpaceOf::::insert( + net_b, + who_b, + UsageTracker { + last_epoch: 9, + used_space: 999, + }, + ); + + // Timelocked index contains both nets + TimelockedIndex::::mutate(|idx| { + idx.insert((net_a, who_a1)); + idx.insert((net_a, who_a2)); + idx.insert((net_b, who_b)); + }); + + // Sanity pre-checks + assert!(CommitmentOf::::get(net_a, who_a1).is_some()); + assert!(CommitmentOf::::get(net_b, who_b).is_some()); + assert!(TimelockedIndex::::get().contains(&(net_a, who_a1))); + + // Act + Pallet::::purge_netuid(net_a); + + // NET A: everything cleared + assert_eq!(CommitmentOf::::iter_prefix(net_a).count(), 0); + assert!(CommitmentOf::::get(net_a, who_a1).is_none()); + assert!(CommitmentOf::::get(net_a, who_a2).is_none()); + + assert_eq!(LastCommitment::::iter_prefix(net_a).count(), 0); + assert!(LastCommitment::::get(net_a, who_a1).is_none()); + assert!(LastCommitment::::get(net_a, who_a2).is_none()); + + assert_eq!(LastBondsReset::::iter_prefix(net_a).count(), 0); + assert!(LastBondsReset::::get(net_a, who_a1).is_none()); + + assert_eq!(RevealedCommitments::::iter_prefix(net_a).count(), 0); + assert!(RevealedCommitments::::get(net_a, who_a1).is_none()); + + assert_eq!(UsedSpaceOf::::iter_prefix(net_a).count(), 0); + assert!(UsedSpaceOf::::get(net_a, who_a1).is_none()); + + let idx_after = TimelockedIndex::::get(); + assert!(!idx_after.contains(&(net_a, who_a1))); + assert!(!idx_after.contains(&(net_a, who_a2))); + + // NET B: untouched + assert!(CommitmentOf::::get(net_b, who_b).is_some()); + assert!(LastCommitment::::get(net_b, who_b).is_some()); + assert!(LastBondsReset::::get(net_b, who_b).is_some()); + assert!(RevealedCommitments::::get(net_b, who_b).is_some()); + assert!(UsedSpaceOf::::get(net_b, who_b).is_some()); + assert!(idx_after.contains(&(net_b, who_b))); + + // Idempotency + Pallet::::purge_netuid(net_a); + assert_eq!(CommitmentOf::::iter_prefix(net_a).count(), 0); + assert!(!TimelockedIndex::::get().contains(&(net_a, who_a1))); + }); +} diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 66f751d9d7..1a6026b3ec 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -16,6 +16,7 @@ // DEALINGS IN THE SOFTWARE. use super::*; +use crate::CommitmentsInterface; use frame_support::{dispatch::Pays, weights::Weight}; use safe_math::*; use sp_core::Get; @@ -374,6 +375,7 @@ impl Pallet { // 2. --- Perform the cleanup before removing the network. T::SwapInterface::dissolve_all_liquidity_providers(netuid)?; Self::destroy_alpha_in_out_stakes(netuid)?; + T::CommitmentsInterface::purge_netuid(netuid); // 3. --- Remove the network Self::remove_network(netuid); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index c37311c3a8..345135a6dd 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -2164,3 +2164,8 @@ impl ProxyInterface for () { Ok(()) } } + +/// Pallets that hold per-subnet commitments implement this to purge all state for `netuid`. +pub trait CommitmentsInterface { + fn purge_netuid(netuid: NetUid); +} diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 3479ad8101..5e63875a91 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -6,6 +6,7 @@ use frame_support::pallet_macros::pallet_section; #[pallet_section] mod config { + use crate::CommitmentsInterface; use subtensor_swap_interface::SwapHandler; /// Configure the pallet by specifying the parameters and types on which it depends. @@ -58,6 +59,9 @@ mod config { /// Interface to allow interacting with the proxy pallet. type ProxyInterface: crate::ProxyInterface; + /// Interface to clean commitments on network dissolution. + type CommitmentsInterface: CommitmentsInterface; + /// ================================= /// ==== Initial Value Constants ==== /// ================================= diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 5838d5974f..c278acae0f 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -454,6 +454,7 @@ impl crate::Config for Test { type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; type ProxyInterface = FakeProxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; + type CommitmentsInterface = CommitmentsI; } // Swap-related parameter types @@ -485,6 +486,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl CommitmentsInterface for CommitmentsI { + fn purge_netuid(netuid: NetUid) {} +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index c2f5caa432..916fdc591c 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -288,6 +288,7 @@ impl pallet_subtensor::Config for Test { type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; + type CommitmentsInterface = CommitmentsI; } parameter_types! { @@ -414,6 +415,11 @@ impl PrivilegeCmp for OriginPrivilegeCmp { } } +pub struct CommitmentsI; +impl pallet_subtensor::CommitmentsInterface for CommitmentsI { + fn purge_netuid(netuid: NetUid) {} +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 8581483b91..5178efd261 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -491,7 +491,9 @@ impl CanVote for CanVoteToTriumvirate { } } -use pallet_subtensor::{CollectiveInterface, MemberManagement, ProxyInterface}; +use pallet_subtensor::{ + CollectiveInterface, CommitmentsInterface, MemberManagement, ProxyInterface, +}; pub struct ManageSenateMembers; impl MemberManagement for ManageSenateMembers { fn add_member(account: &AccountId) -> DispatchResultWithPostInfo { @@ -915,6 +917,13 @@ impl ProxyInterface for Proxier { } } +pub struct CommitmentsI; +impl CommitmentsInterface for CommitmentsI { + fn purge_netuid(netuid: NetUid) { + pallet_commitments::Pallet::::purge_netuid(netuid); + } +} + parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; @@ -1239,6 +1248,7 @@ impl pallet_subtensor::Config for Runtime { type HotkeySwapOnSubnetInterval = HotkeySwapOnSubnetInterval; type ProxyInterface = Proxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; + type CommitmentsInterface = CommitmentsI; } parameter_types! { From 79da970f04d2ff40c2d95dbbc103e102fa84f980 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:28:08 -0700 Subject: [PATCH 152/379] fix tests --- pallets/subtensor/src/tests/networks.rs | 2 -- pallets/subtensor/src/tests/subnet.rs | 5 ----- 2 files changed, 7 deletions(-) diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index d62bc4fd2f..ea1c236149 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -386,7 +386,6 @@ fn dissolve_clears_all_per_subnet_storages() { MaxBurn::::insert(net, TaoCurrency::from(2)); MinDifficulty::::insert(net, 1u64); MaxDifficulty::::insert(net, 2u64); - LastAdjustmentBlock::::insert(net, 1u64); RegistrationsThisBlock::::insert(net, 1u16); EMAPriceHalvingBlocks::::insert(net, 1u64); RAORecycledForRegistration::::insert(net, TaoCurrency::from(1)); @@ -534,7 +533,6 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!MaxBurn::::contains_key(net)); assert!(!MinDifficulty::::contains_key(net)); assert!(!MaxDifficulty::::contains_key(net)); - assert!(!LastAdjustmentBlock::::contains_key(net)); assert!(!RegistrationsThisBlock::::contains_key(net)); assert!(!EMAPriceHalvingBlocks::::contains_key(net)); assert!(!RAORecycledForRegistration::::contains_key(net)); diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index 6bf4a6873b..ba5640af3d 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -727,11 +727,6 @@ fn test_user_liquidity_access_control() { // add network let netuid = add_dynamic_network(&owner_hotkey, &owner_coldkey); - // Initially should be disabled - assert!(!pallet_subtensor_swap::EnabledUserLiquidity::::get( - NetUid::from(netuid) - )); - // Not owner, not root: should fail assert_noop!( Swap::toggle_user_liquidity(RuntimeOrigin::signed(not_owner), netuid, true), From d4ac33fbaafb93829ecda1289140935b3fd8e386 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:48:36 -0700 Subject: [PATCH 153/379] cleanup merge --- pallets/admin-utils/src/tests/mock.rs | 2 +- pallets/commitments/src/lib.rs | 20 ++++++++++---------- pallets/subtensor/src/macros/config.rs | 2 +- pallets/subtensor/src/subnets/subnet.rs | 1 - pallets/subtensor/src/tests/mock.rs | 2 +- pallets/transaction-fee/src/tests/mock.rs | 2 +- 6 files changed, 14 insertions(+), 15 deletions(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 19664c50f0..94c9c0be58 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -357,7 +357,7 @@ impl PrivilegeCmp for OriginPrivilegeCmp { pub struct CommitmentsI; impl pallet_subtensor::CommitmentsInterface for CommitmentsI { - fn purge_netuid(netuid: NetUid) {} + fn purge_netuid(_netuid: NetUid) {} } pub struct GrandpaInterfaceImpl; diff --git a/pallets/commitments/src/lib.rs b/pallets/commitments/src/lib.rs index b1a3e40403..5fa37bf5e1 100644 --- a/pallets/commitments/src/lib.rs +++ b/pallets/commitments/src/lib.rs @@ -566,16 +566,6 @@ impl Pallet { .collect(); commitments } -} - -pub trait GetCommitments { - fn get_commitments(netuid: NetUid) -> Vec<(AccountId, Vec)>; -} - -impl GetCommitments for () { - fn get_commitments(_netuid: NetUid) -> Vec<(AccountId, Vec)> { - Vec::new() - } pub fn purge_netuid(netuid: NetUid) { let _ = CommitmentOf::::clear_prefix(netuid, u32::MAX, None); @@ -589,3 +579,13 @@ impl GetCommitments for () { }); } } + +pub trait GetCommitments { + fn get_commitments(netuid: NetUid) -> Vec<(AccountId, Vec)>; +} + +impl GetCommitments for () { + fn get_commitments(_netuid: NetUid) -> Vec<(AccountId, Vec)> { + Vec::new() + } +} diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 9bc610c958..8d624348d0 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -6,8 +6,8 @@ use frame_support::pallet_macros::pallet_section; #[pallet_section] mod config { - use pallet_commitments::GetCommitments; use crate::CommitmentsInterface; + use pallet_commitments::GetCommitments; use subtensor_swap_interface::SwapHandler; /// Configure the pallet by specifying the parameters and types on which it depends. diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 0051e34b87..3ce40cc366 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -195,7 +195,6 @@ impl Pallet { log::debug!("SubnetMechanism for netuid {netuid_to_register:?} set to: {mechid:?}"); // --- 14. Set the creation terms. - NetworkLastRegistered::::set(current_block); NetworkRegisteredAt::::insert(netuid_to_register, current_block); // --- 15. Set the symbol. diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index f634268267..8607e14ec7 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -493,7 +493,7 @@ impl PrivilegeCmp for OriginPrivilegeCmp { pub struct CommitmentsI; impl CommitmentsInterface for CommitmentsI { - fn purge_netuid(netuid: NetUid) {} + fn purge_netuid(_netuid: NetUid) {} } parameter_types! { diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 51e2b62da9..532c140c9e 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -422,7 +422,7 @@ impl PrivilegeCmp for OriginPrivilegeCmp { pub struct CommitmentsI; impl pallet_subtensor::CommitmentsInterface for CommitmentsI { - fn purge_netuid(netuid: NetUid) {} + fn purge_netuid(_netuid: NetUid) {} } parameter_types! { From f45d30767253a99d4908b3c19a76a5376e7a5aa2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Mon, 8 Sep 2025 18:06:20 -0400 Subject: [PATCH 154/379] Remove subsubnet throttling --- pallets/subtensor/src/coinbase/block_step.rs | 2 - pallets/subtensor/src/lib.rs | 9 -- pallets/subtensor/src/subnets/subsubnet.rs | 97 +++++--------- pallets/subtensor/src/tests/subsubnet.rs | 127 ++++++------------- 4 files changed, 70 insertions(+), 165 deletions(-) diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6385a7f756..6a96090b05 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -21,8 +21,6 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); - // --- 5. Update sub-subnet counts - Self::update_subsubnet_counts_if_needed(block_number); // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 9493956405..7be36801db 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1829,15 +1829,6 @@ pub mod pallet { pub fn SuperBlockTempos() -> u16 { 20 } - #[pallet::type_value] - /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) - pub fn GlobalSubsubnetDecreasePerSuperblock() -> SubId { - SubId::from(1) - } - #[pallet::storage] - /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. - pub type SubsubnetCountDesired = - StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index a00e8a5bfe..904c380463 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -88,10 +88,7 @@ impl Pallet { /// Set the desired valus of sub-subnet count for a subnet identified /// by netuid - pub fn do_set_desired_subsubnet_count( - netuid: NetUid, - subsubnet_count: SubId, - ) -> DispatchResult { + pub fn do_set_subsubnet_count(netuid: NetUid, subsubnet_count: SubId) -> DispatchResult { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), @@ -113,75 +110,49 @@ impl Pallet { Error::::InvalidValue ); - SubsubnetCountDesired::::insert(netuid, subsubnet_count); + Self::update_subsubnet_counts_if_needed(netuid, subsubnet_count); + Ok(()) } /// Update current count for a subnet identified by netuid - /// - /// - This function should be called in every block in run_counbase /// - Cleans up all sub-subnet maps if count is reduced - /// - Decreases or increases current subsubnet count by no more than - /// `GlobalSubsubnetDecreasePerSuperblock` /// - pub fn update_subsubnet_counts_if_needed(current_block: u64) { - // Run once per super-block - let super_block_tempos = u64::from(SuperBlockTempos::::get()); - Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block - .saturating_add(u16::from(*netuid) as u64) - .checked_rem(super_block) - { - if rem == 0 { - let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); - let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); - let min_capped_count = old_count - .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) - .max(1); - let max_capped_count = old_count - .saturating_add(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())); - let new_count = desired_count.max(min_capped_count).min(max_capped_count); - - if old_count != new_count { - if old_count > new_count { - for subid in new_count..old_count { - let netuid_index = - Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); - - // Cleanup Weights - let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup Incentive - Incentive::::remove(netuid_index); - - // Cleanup LastUpdate - LastUpdate::::remove(netuid_index); - - // Cleanup Bonds - let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup WeightCommits - let _ = - WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup TimelockedWeightCommits - let _ = TimelockedWeightCommits::::clear_prefix( - netuid_index, - u32::MAX, - None, - ); - } - } + pub fn update_subsubnet_counts_if_needed(netuid: NetUid, new_count: SubId) { + let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + let new_count_u8 = u8::from(new_count); + if old_count != new_count_u8 { + if old_count > new_count_u8 { + for subid in new_count_u8..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); - SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); - // Reset split back to even - SubsubnetEmissionSplit::::remove(netuid); - } + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = + TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); } } - }); + + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + + // Reset split back to even + SubsubnetEmissionSplit::::remove(netuid); + } } pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 08612ef684..8b128a7241 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -26,16 +26,14 @@ // - [x] Incentives are per subsubnet // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) -// - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [x] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward -// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase -// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease +// - [x] SubsubnetEmissionSplit is reset on subsubnet count increase +// - [x] SubsubnetEmissionSplit is reset on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -189,22 +187,22 @@ fn ensure_subsubnet_fails_when_subid_out_of_range() { } #[test] -fn do_set_desired_subsubnet_count_ok_minimal() { +fn do_set_subsubnet_count_ok_minimal() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(3u16); NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists - assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + assert_ok!(SubtensorModule::do_set_subsubnet_count( netuid, SubId::from(1u8) )); - assert_eq!(SubsubnetCountDesired::::get(netuid), SubId::from(1u8)); + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1u8)); }); } #[test] -fn do_set_desired_subsubnet_count_ok_at_effective_cap() { +fn do_set_subsubnet_count_ok_at_effective_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(4u16); NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists @@ -218,69 +216,67 @@ fn do_set_desired_subsubnet_count_ok_at_effective_cap() { compile_cap }; - assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( - netuid, bound - )); - assert_eq!(SubsubnetCountDesired::::get(netuid), bound); + assert_ok!(SubtensorModule::do_set_subsubnet_count(netuid, bound)); + assert_eq!(SubsubnetCountCurrent::::get(netuid), bound); }); } #[test] -fn do_set_desired_fails_when_base_subnet_missing() { +fn do_set_fails_when_base_subnet_missing() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(7u16); // No NetworksAdded insert => base subnet absent assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(1u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(1u8)), Error::::SubNetworkDoesNotExist ); }); } #[test] -fn do_set_desired_fails_for_zero() { +fn do_set_fails_for_zero() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(9u16); NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(0u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(0u8)), Error::::InvalidValue ); }); } #[test] -fn do_set_desired_fails_when_over_runtime_cap() { +fn do_set_fails_when_over_runtime_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(11u16); NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists // Runtime cap is 8 (per function), so 9 must fail assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(9u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(9u8)), Error::::InvalidValue ); }); } #[test] -fn do_set_desired_fails_when_over_compile_time_cap() { +fn do_set_fails_when_over_compile_time_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(12u16); NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, too_big), + SubtensorModule::do_set_subsubnet_count(netuid, too_big), Error::::InvalidValue ); }); } #[test] -fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { +fn update_subsubnet_counts_decreases_and_cleans() { new_test_ext(1).execute_with(|| { let hotkey = U256::from(1); @@ -288,27 +284,17 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - netuid - Tempo::::insert(netuid, 360u16); - let super_block = u64::from(SuperBlockTempos::::get()) - * u64::from(Tempo::::get(netuid)) - - u16::from(netuid) as u64; - - // Choose counts so result is deterministic for ANY decrease-per-superblock. - // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. - let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); - let desired = SubId::from(1u8); - // min_capped = max(old - dec, 1) = 3 => new_count = 3 + // Choose counts so result is deterministic. + let old = SubId::from(3); + let desired = SubId::from(2u8); SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); // Set non-default subnet emission split SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); - // Seed data at a kept subid (2) and a removed subid (3) - let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); - let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); + // Seed data at a kept subid (1) and a removed subid (2) + let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1u8)); + let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); Incentive::::insert(idx_keep, vec![1u16]); @@ -340,11 +326,11 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), ); - // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block); + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); - // New count is 3 - assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); // Kept prefix intact assert_eq!(Incentive::::get(idx_keep), vec![1u16]); @@ -366,78 +352,37 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { idx_rm3, 1u64 )); - // SubsubnetEmissionSplit is reset on super-block + // SubsubnetEmissionSplit is reset assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } #[test] -fn update_subsubnet_counts_increases_on_superblock() { +fn update_subsubnet_counts_increases() { new_test_ext(1).execute_with(|| { // Base subnet exists let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - netuid - Tempo::::insert(netuid, 360u16); - let super_block = u64::from(SuperBlockTempos::::get()) - * u64::from(Tempo::::get(netuid)) - - u16::from(netuid) as u64; - - // Choose counts so result is deterministic for ANY increase-per-superblock. - let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + // Choose counts let old = SubId::from(1u8); - let desired = SubId::from(5u8); + let desired = SubId::from(2u8); SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); // Set non-default subnet emission split SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); - // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block); + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); - // New count is old + inc - assert_eq!( - SubsubnetCountCurrent::::get(netuid), - SubId::from(1 + inc) - ); + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); - // SubsubnetEmissionSplit is reset on super-block + // SubsubnetEmissionSplit is reset assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } -#[test] -fn update_subsubnet_counts_no_change_when_not_superblock() { - new_test_ext(1).execute_with(|| { - let netuid = NetUid::from(100u16); - NetworksAdded::::insert(NetUid::from(100u16), true); - - Tempo::::insert(netuid, 1u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); - - // Setup counts as in the previous test - let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); - let desired = SubId::from(1u8); - SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); - - // Marker value at a subid that would be kept if a change happened - let idx_mark = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); - Incentive::::insert(idx_mark, vec![77u16]); - - // Act on a non-boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block - 1); - - // Nothing changes - assert_eq!(SubsubnetCountCurrent::::get(netuid), old); - assert_eq!(Incentive::::get(idx_mark), vec![77u16]); - }); -} - #[test] fn split_emissions_even_division() { new_test_ext(1).execute_with(|| { From 7caedb1d53fbc7b41cde7f4dbb16b99928e7be06 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Mon, 8 Sep 2025 18:20:04 -0400 Subject: [PATCH 155/379] Cleanup admin-util for setting cubcubnet counts --- pallets/admin-utils/src/lib.rs | 4 ++-- pallets/admin-utils/src/tests/mod.rs | 14 +++++--------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index e64fee4a84..37579c4e32 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1625,13 +1625,13 @@ pub mod pallet { #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] - pub fn sudo_set_desired_subsubnet_count( + pub fn sudo_set_subsubnet_count( origin: OriginFor, netuid: NetUid, subsub_count: SubId, ) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; + pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 79c237f526..ae50059142 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2074,7 +2074,7 @@ fn test_sudo_set_max_burn() { } #[test] -fn test_sudo_set_desired_subsubnet_count() { +fn test_sudo_set_subsubnet_count() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); let ss_count_ok = SubId::from(8); @@ -2086,7 +2086,7 @@ fn test_sudo_set_desired_subsubnet_count() { SubnetOwner::::insert(netuid, sn_owner); assert_eq!( - AdminUtils::sudo_set_desired_subsubnet_count( + AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::signed(U256::from(1)), netuid, ss_count_ok @@ -2094,21 +2094,17 @@ fn test_sudo_set_desired_subsubnet_count() { Err(DispatchError::BadOrigin) ); assert_noop!( - AdminUtils::sudo_set_desired_subsubnet_count( - RuntimeOrigin::root(), - netuid, - ss_count_bad - ), + AdminUtils::sudo_set_subsubnet_count(RuntimeOrigin::root(), netuid, ss_count_bad), pallet_subtensor::Error::::InvalidValue ); - assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::root(), netuid, ss_count_ok )); - assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok From 0c5c596dfa7602fff5865a57c98e3020b642e5bf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 8 Sep 2025 22:21:54 +0000 Subject: [PATCH 156/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index b9f3391d5e..80459643d4 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -623,7 +623,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(41_240_000, 0) + #[pallet::weight((Weight::from_parts(32_440_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( @@ -1041,7 +1041,7 @@ mod dispatches { #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(59_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().writes(60_u64)), DispatchClass::Normal, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1328,7 +1328,7 @@ mod dispatches { #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) .saturating_add(T::DbWeight::get().reads(36_u64)) - .saturating_add(T::DbWeight::get().writes(58_u64)), DispatchClass::Normal, Pays::No))] + .saturating_add(T::DbWeight::get().writes(59_u64)), DispatchClass::Normal, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, From a9470a5e4bc76ecc147c5f721317f918728c94f7 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 9 Sep 2025 15:23:35 +0300 Subject: [PATCH 157/379] Update EVM tests and cleanup --- evm-tests/README.md | 6 ++ .../test/staking.precompile.stake-get.test.ts | 2 +- .../subnet.precompile.hyperparameter.test.ts | 17 ++++- pallets/admin-utils/src/lib.rs | 66 +++++++------------ pallets/admin-utils/src/tests/mod.rs | 6 +- pallets/subtensor/src/utils/misc.rs | 25 +++---- 6 files changed, 64 insertions(+), 58 deletions(-) diff --git a/evm-tests/README.md b/evm-tests/README.md index 83dc8f326f..ed3782e0f7 100644 --- a/evm-tests/README.md +++ b/evm-tests/README.md @@ -13,6 +13,12 @@ between runtime and precompile contracts. ## polkadot api +You need `polkadot-api` globally installed: + +```bash +$ npm i -g polkadot-api +``` + To get the metadata, you need start the localnet via run `./scripts/localnet.sh`. then run following command to get metadata, a folder name .papi will be created, which include the metadata and type definitions. diff --git a/evm-tests/test/staking.precompile.stake-get.test.ts b/evm-tests/test/staking.precompile.stake-get.test.ts index d9cc79aeab..4730e310d9 100644 --- a/evm-tests/test/staking.precompile.stake-get.test.ts +++ b/evm-tests/test/staking.precompile.stake-get.test.ts @@ -45,7 +45,7 @@ describe("Test staking precompile get methods", () => { await contract.getStake(hotkey.publicKey, coldkey.publicKey, netuid) ); - // validator returned as bigint now. + // validator returned as bigint now. const validators = await contract.getAlphaStakedValidators(hotkey.publicKey, netuid) diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index b8a6f19075..5d81049d41 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -1,6 +1,6 @@ import * as assert from "assert"; -import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair, getAliceSigner, waitForTransactionWithRetry } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" @@ -25,6 +25,21 @@ describe("Test the Subnet precompile contract", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey1.publicKey)) await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey2.publicKey)) await forceSetBalanceToEthAddress(api, wallet.address) + + // Disable admin freeze window and owner hyperparam rate limiting for tests + { + const alice = getAliceSigner() + + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } }) it("Can register network without identity info", async () => { diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 9a52742c80..5609fadd10 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -220,7 +220,7 @@ pub mod pallet { )?; pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); log::debug!("ServingRateLimitSet( serving_rate_limit: {serving_rate_limit:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -279,7 +279,7 @@ pub mod pallet { log::debug!( "MaxDifficultySet( netuid: {netuid:?} max_difficulty: {max_difficulty:?} ) " ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -313,7 +313,7 @@ pub mod pallet { Error::::SubnetDoesNotExist ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[ @@ -407,7 +407,7 @@ pub mod pallet { Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_adjustment_alpha(netuid, adjustment_alpha); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -439,7 +439,7 @@ pub mod pallet { Error::::SubnetDoesNotExist ); pallet_subtensor::Pallet::::set_max_weight_limit(netuid, max_weight_limit); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -473,7 +473,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_immunity_period(netuid, immunity_period); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -510,7 +510,7 @@ pub mod pallet { log::debug!( "MinAllowedWeightSet( netuid: {netuid:?} min_allowed_weights: {min_allowed_weights:?} ) " ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -566,7 +566,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_kappa(netuid, kappa); log::debug!("KappaSet( netuid: {netuid:?} kappa: {kappa:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -594,7 +594,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_rho(netuid, rho); log::debug!("RhoSet( netuid: {netuid:?} rho: {rho:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -634,7 +634,7 @@ pub mod pallet { log::debug!( "ActivityCutoffSet( netuid: {netuid:?} activity_cutoff: {activity_cutoff:?} ) " ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -695,7 +695,7 @@ pub mod pallet { log::debug!( "NetworkPowRegistrationAllowed( registration_allowed: {registration_allowed:?} ) " ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -763,7 +763,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_min_burn(netuid, min_burn); log::debug!("MinBurnSet( netuid: {netuid:?} min_burn: {min_burn:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -803,7 +803,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_max_burn(netuid, max_burn); log::debug!("MaxBurnSet( netuid: {netuid:?} max_burn: {max_burn:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -898,7 +898,7 @@ pub mod pallet { log::debug!( "BondsMovingAverageSet( netuid: {netuid:?} bonds_moving_average: {bonds_moving_average:?} ) " ); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -930,7 +930,7 @@ pub mod pallet { ); pallet_subtensor::Pallet::::set_bonds_penalty(netuid, bonds_penalty); log::debug!("BondsPenalty( netuid: {netuid:?} bonds_penalty: {bonds_penalty:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1215,7 +1215,7 @@ pub mod pallet { pallet_subtensor::Pallet::::set_commit_reveal_weights_enabled(netuid, enabled); log::debug!("ToggleSetWeightsCommitReveal( netuid: {netuid:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1246,7 +1246,7 @@ pub mod pallet { )?; pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); log::debug!("LiquidAlphaEnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1272,7 +1272,7 @@ pub mod pallet { origin, netuid, alpha_low, alpha_high, ); if res.is_ok() { - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1384,7 +1384,7 @@ pub mod pallet { log::debug!("SetWeightCommitInterval( netuid: {netuid:?}, interval: {interval:?} ) "); pallet_subtensor::Pallet::::set_reveal_period(netuid, interval)?; - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1469,7 +1469,7 @@ pub mod pallet { )?; let res = pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle); if res.is_ok() { - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1624,7 +1624,7 @@ pub mod pallet { pallet_subtensor::Pallet::::set_alpha_sigmoid_steepness(netuid, steepness); log::debug!("AlphaSigmoidSteepnessSet( netuid: {netuid:?}, steepness: {steepness:?} )"); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1657,7 +1657,7 @@ pub mod pallet { Self::deposit_event(Event::Yuma3EnableToggled { netuid, enabled }); log::debug!("Yuma3EnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1690,7 +1690,7 @@ pub mod pallet { Self::deposit_event(Event::BondsResetToggled { netuid, enabled }); log::debug!("BondsResetToggled( netuid: {netuid:?} bonds_reset: {enabled:?} ) "); - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1800,7 +1800,7 @@ pub mod pallet { &[TransactionType::OwnerHyperparamUpdate], )?; pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; - Self::record_owner_rl( + pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, &[TransactionType::OwnerHyperparamUpdate], @@ -1847,24 +1847,6 @@ pub mod pallet { Ok(()) } } - - impl Pallet { - // Helper: if owner path, record last-blocks for the provided TransactionTypes - fn record_owner_rl( - maybe_owner: Option<::AccountId>, - netuid: NetUid, - txs: &[TransactionType], - ) { - if let Some(who) = maybe_owner { - let now = pallet_subtensor::Pallet::::get_current_block_as_u64(); - for tx in txs { - pallet_subtensor::Pallet::::set_last_transaction_block_on_subnet( - &who, netuid, tx, now, - ); - } - } - } - } } impl sp_runtime::BoundToRuntimeAppPublic for Pallet { diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index df11fd9912..9b0197860c 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1968,7 +1968,7 @@ fn test_sudo_set_admin_freeze_window_and_rate() { <::RuntimeOrigin>::root(), 7 )); - assert_eq!(SubtensorModule::get_admin_freeze_window(), 7); + assert_eq!(pallet_subtensor::AdminFreezeWindow::::get(), 7); // Owner hyperparam rate limit setter assert_eq!( @@ -1982,7 +1982,7 @@ fn test_sudo_set_admin_freeze_window_and_rate() { <::RuntimeOrigin>::root(), 5 )); - assert_eq!(SubtensorModule::get_owner_hyperparam_rate_limit(), 5); + assert_eq!(pallet_subtensor::OwnerHyperparamRateLimit::::get(), 5); }); } @@ -2149,7 +2149,7 @@ fn test_hyperparam_rate_limit_not_blocking_with_default() { SubnetOwner::::insert(netuid, owner); // Read the default (unset) owner hyperparam rate limit - let default_limit = SubtensorModule::get_owner_hyperparam_rate_limit(); + let default_limit = pallet_subtensor::OwnerHyperparamRateLimit::::get(); assert_eq!(default_limit, 0); diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 0951cd3ead..cc4485bf55 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -111,27 +111,30 @@ impl Pallet { Ok(()) } - // (Removed dedicated ensure_owner_hparam_rate_limit; OwnerHyperparamUpdate is checked via TransactionType) - - // === Admin freeze window accessors === - pub fn get_admin_freeze_window() -> u16 { - AdminFreezeWindow::::get() - } - pub fn set_admin_freeze_window(window: u16) { AdminFreezeWindow::::set(window); Self::deposit_event(Event::AdminFreezeWindowSet(window)); } - pub fn get_owner_hyperparam_rate_limit() -> u64 { - OwnerHyperparamRateLimit::::get() - } - pub fn set_owner_hyperparam_rate_limit(limit: u64) { OwnerHyperparamRateLimit::::set(limit); Self::deposit_event(Event::OwnerHyperparamRateLimitSet(limit)); } + /// If owner is `Some`, record last-blocks for the provided `TransactionType`s. + pub fn record_owner_rl( + maybe_owner: Option<::AccountId>, + netuid: NetUid, + txs: &[TransactionType], + ) { + if let Some(who) = maybe_owner { + let now = Self::get_current_block_as_u64(); + for tx in txs { + Self::set_last_transaction_block_on_subnet(&who, netuid, tx, now); + } + } + } + // ======================== // ==== Global Setters ==== // ======================== From 0b5e5b1467391798d3f1c8a7119ecf48fc74df17 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 3 Sep 2025 18:18:31 -0300 Subject: [PATCH 158/379] trim by emission and compress to the left --- pallets/admin-utils/src/lib.rs | 6 +- pallets/admin-utils/src/tests/mock.rs | 6 +- pallets/admin-utils/src/tests/mod.rs | 174 +++++++++++-- .../subtensor/src/coinbase/run_coinbase.rs | 32 ++- pallets/subtensor/src/lib.rs | 14 +- pallets/subtensor/src/macros/config.rs | 8 +- pallets/subtensor/src/subnets/uids.rs | 237 ++++++++++++------ pallets/subtensor/src/tests/mock.rs | 2 - pallets/transaction-fee/src/tests/mock.rs | 2 - runtime/src/lib.rs | 3 +- 10 files changed, 349 insertions(+), 135 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index a4d8ff9061..5a49796e30 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1626,7 +1626,11 @@ pub mod pallet { Ok(()) } - /// Sets the maximum allowed UIDs for a subnet + /// Trims the maximum number of UIDs for a subnet. + /// + /// The trimming is done by sorting the UIDs by emission descending and then trimming + /// the lowest emitters while preserving temporally and owner immune UIDs. The UIDs are + /// then compressed to the left and storage is migrated to the new compressed UIDs. #[pallet::call_index(74)] #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 67099a1906..a3931b045d 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -91,7 +91,8 @@ parameter_types! { pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -129,7 +130,6 @@ parameter_types! { pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -174,6 +174,7 @@ impl pallet_subtensor::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; @@ -205,7 +206,6 @@ impl pallet_subtensor::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index a131c7e0f3..ccce3a1e49 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2077,57 +2077,178 @@ fn test_sudo_set_max_burn() { fn test_trim_to_max_allowed_uids() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); + let sn_owner = U256::from(1); + let sn_owner_hotkey1 = U256::from(2); + let sn_owner_hotkey2 = U256::from(3); add_network(netuid, 10); + SubnetOwner::::insert(netuid, sn_owner); + SubnetOwnerHotkey::::insert(netuid, sn_owner_hotkey1); MaxRegistrationsPerBlock::::insert(netuid, 256); TargetRegistrationsPerInterval::::insert(netuid, 256); + ImmuneOwnerUidsLimit::::insert(netuid, 2); + // We set a low value here to make testing easier + MinAllowedUids::::set(netuid, 4); // Add some neurons - let max_n = 32; + let max_n = 16; for i in 1..=max_n { let n = i * 1000; register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); } - // Run some block to ensure stake weights are set - run_to_block(20); + // Run some block to ensure stake weights are set and that we are past the immunity period + // for all neurons + run_to_block((ImmunityPeriod::::get(netuid) + 1).into()); + + // Set some randomized values that we can keep track of + let values = vec![ + 17u16, 42u16, 8u16, 56u16, 23u16, 91u16, 34u16, // owner owned + 77u16, // temporally immune + 12u16, 65u16, 3u16, 88u16, // owner owned + 29u16, 51u16, 74u16, // temporally immune + 39u16, + ]; + let bool_values = vec![ + false, false, false, true, false, true, true, // owner owned + true, // temporally immune + false, true, false, true, // owner owned + false, true, true, // temporally immune + false, + ]; + let alpha_values = values.iter().map(|&v| (v as u64).into()).collect(); + let u64_values: Vec = values.iter().map(|&v| v as u64).collect(); + + Emission::::set(netuid, alpha_values); + Rank::::insert(netuid, values.clone()); + Trust::::insert(netuid, values.clone()); + Consensus::::insert(netuid, values.clone()); + Incentive::::insert(netuid, values.clone()); + Dividends::::insert(netuid, values.clone()); + LastUpdate::::insert(netuid, u64_values); + PruningScores::::insert(netuid, values.clone()); + ValidatorTrust::::insert(netuid, values.clone()); + StakeWeight::::insert(netuid, values); + ValidatorPermit::::insert(netuid, bool_values.clone()); + Active::::insert(netuid, bool_values); + + // We set some owner immune uids + let now = frame_system::Pallet::::block_number(); + BlockAtRegistration::::set(netuid, 6, now); + BlockAtRegistration::::set(netuid, 11, now); + + // And some temporally immune uids + Keys::::insert(netuid, 7, sn_owner_hotkey1); + Uids::::insert(netuid, sn_owner_hotkey1, 7); + Keys::::insert(netuid, 14, sn_owner_hotkey2); + Uids::::insert(netuid, sn_owner_hotkey2, 14); + + // Populate Weights and Bonds storage items to test trimming + // Create weights and bonds that span across the range that will be trimmed + for uid in 0..max_n { + let mut weights = Vec::new(); + let mut bonds = Vec::new(); + + // Add connections to all other uids, including those that will be trimmed + for target_uid in 0..max_n { + if target_uid != uid { + // Use some non-zero values to make the test more meaningful + let weight_value = ((uid + target_uid) % 1000) as u16; + let bond_value = ((uid * target_uid) % 1000) as u16; + weights.push((target_uid, weight_value)); + bonds.push((target_uid, bond_value)); + } + } + + Weights::::insert(netuid, uid, weights); + Bonds::::insert(netuid, uid, bonds); + } // Normal case - let new_max_n = 20; + let new_max_n = 8; assert_ok!(AdminUtils::sudo_trim_to_max_allowed_uids( <::RuntimeOrigin>::root(), netuid, new_max_n )); - // Ensure storage has been trimmed + // Ensure the max allowed uids has been set correctly assert_eq!(MaxAllowedUids::::get(netuid), new_max_n); - assert_eq!(Rank::::get(netuid).len(), new_max_n as usize); - assert_eq!(Trust::::get(netuid).len(), new_max_n as usize); - assert_eq!(Active::::get(netuid).len(), new_max_n as usize); - assert_eq!(Emission::::get(netuid).len(), new_max_n as usize); - assert_eq!(Consensus::::get(netuid).len(), new_max_n as usize); - assert_eq!(Incentive::::get(netuid).len(), new_max_n as usize); - assert_eq!(Dividends::::get(netuid).len(), new_max_n as usize); - assert_eq!(LastUpdate::::get(netuid).len(), new_max_n as usize); - assert_eq!(PruningScores::::get(netuid).len(), new_max_n as usize); - assert_eq!( - ValidatorTrust::::get(netuid).len(), - new_max_n as usize - ); - assert_eq!( - ValidatorPermit::::get(netuid).len(), - new_max_n as usize - ); - assert_eq!(StakeWeight::::get(netuid).len(), new_max_n as usize); - for uid in max_n..new_max_n { + // Ensure the emission has been trimmed correctly, keeping the highest emitters + // and immune and compressed to the left + assert_eq!( + Emission::::get(netuid), + vec![ + 56.into(), + 91.into(), + 34.into(), + 77.into(), + 65.into(), + 88.into(), + 51.into(), + 74.into() + ] + ); + // Ensure rest of storage has been trimmed correctly + let expected_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; + let expected_bools = vec![true, true, true, true, true, true, true, true]; + let expected_u64_values = vec![56, 91, 34, 77, 65, 88, 51, 74]; + assert_eq!(Rank::::get(netuid), expected_values); + assert_eq!(Trust::::get(netuid), expected_values); + assert_eq!(Active::::get(netuid), expected_bools); + assert_eq!(Consensus::::get(netuid), expected_values); + assert_eq!(Incentive::::get(netuid), expected_values); + assert_eq!(Dividends::::get(netuid), expected_values); + assert_eq!(LastUpdate::::get(netuid), expected_u64_values); + assert_eq!(PruningScores::::get(netuid), expected_values); + assert_eq!(ValidatorTrust::::get(netuid), expected_values); + assert_eq!(ValidatorPermit::::get(netuid), expected_bools); + assert_eq!(StakeWeight::::get(netuid), expected_values); + + // Ensure trimmed uids related storage has been cleared + for uid in new_max_n..max_n { assert!(!Keys::::contains_key(netuid, uid)); assert!(!BlockAtRegistration::::contains_key(netuid, uid)); assert!(!Weights::::contains_key(netuid, uid)); assert!(!Bonds::::contains_key(netuid, uid)); } - for uid in 0..max_n { + // Ensure trimmed uids hotkey related storage has been cleared + let trimmed_hotkeys = vec![ + U256::from(1000), + U256::from(2000), + U256::from(3000), + U256::from(5000), + U256::from(9000), + U256::from(11000), + U256::from(13000), + U256::from(16000), + ]; + for hotkey in trimmed_hotkeys { + assert!(!Uids::::contains_key(netuid, &hotkey)); + assert!(!IsNetworkMember::::contains_key(&hotkey, netuid)); + assert!(!LastHotkeyEmissionOnNetuid::::contains_key( + &hotkey, netuid + )); + assert!(!AlphaDividendsPerSubnet::::contains_key( + netuid, &hotkey + )); + assert!(!TaoDividendsPerSubnet::::contains_key( + netuid, &hotkey + )); + assert!(!Axons::::contains_key(netuid, &hotkey)); + assert!(!NeuronCertificates::::contains_key(netuid, &hotkey)); + assert!(!Prometheus::::contains_key(netuid, &hotkey)); + } + + // Ensure trimmed uids weights and bonds have been cleared + for uid in new_max_n..max_n { + assert!(!Weights::::contains_key(netuid, uid)); + assert!(!Bonds::::contains_key(netuid, uid)); + } + + // Ensure trimmed uids weights and bonds connections have been trimmed correctly + for uid in 0..new_max_n { assert!( Weights::::get(netuid, uid) .iter() @@ -2142,6 +2263,7 @@ fn test_trim_to_max_allowed_uids() { ); } + // Actual number of neurons on the network updated after trimming assert_eq!(SubnetworkN::::get(netuid), new_max_n); // Non existent subnet @@ -2159,7 +2281,7 @@ fn test_trim_to_max_allowed_uids() { AdminUtils::sudo_trim_to_max_allowed_uids( <::RuntimeOrigin>::root(), netuid, - 15 + 2 ), pallet_subtensor::Error::::InvalidValue ); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 21fc9bd603..deb2921575 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -429,6 +429,20 @@ impl Pallet { } fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(_, hk)| hk) + .collect() + } + + pub fn get_immune_owner_uids(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(uid, _)| uid) + .collect() + } + + fn get_immune_owner_tuples(netuid: NetUid, coldkey: &T::AccountId) -> Vec<(u16, T::AccountId)> { // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) .into_iter() @@ -451,22 +465,24 @@ impl Pallet { triples.truncate(limit); } - // Project to just hotkeys - let mut immune_hotkeys: Vec = - triples.into_iter().map(|(_, _, hk)| hk).collect(); + // Project to uid/hotkey tuple + let mut immune_tuples: Vec<(u16, T::AccountId)> = + triples.into_iter().map(|(_, uid, hk)| (uid, hk)).collect(); // Insert subnet owner hotkey in the beginning of the list if valid and not // already present if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { - immune_hotkeys.insert(0, owner_hk); - if immune_hotkeys.len() > limit { - immune_hotkeys.truncate(limit); + if let Some(owner_uid) = Uids::::get(netuid, &owner_hk) { + if !immune_tuples.contains(&(owner_uid, owner_hk.clone())) { + immune_tuples.insert(0, (owner_uid, owner_hk.clone())); + if immune_tuples.len() > limit { + immune_tuples.truncate(limit); + } } } } - immune_hotkeys + immune_tuples } pub fn distribute_dividends_and_incentives( diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index c175261ba9..44bee03f86 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -530,11 +530,6 @@ pub mod pallet { 0 } #[pallet::type_value] - /// Default value for network min allowed UIDs. - pub fn DefaultNetworkMinAllowedUids() -> u16 { - T::InitialNetworkMinAllowedUids::get() - } - #[pallet::type_value] /// Default value for network min lock cost. pub fn DefaultNetworkMinLockCost() -> TaoCurrency { T::InitialNetworkMinLockCost::get().into() @@ -620,6 +615,11 @@ pub mod pallet { T::InitialKappa::get() } #[pallet::type_value] + /// Default value for network min allowed UIDs. + pub fn DefaultMinAllowedUids() -> u16 { + T::InitialMinAllowedUids::get() + } + #[pallet::type_value] /// Default maximum allowed UIDs. pub fn DefaultMaxAllowedUids() -> u16 { T::InitialMaxAllowedUids::get() @@ -1335,6 +1335,10 @@ pub mod pallet { pub type BurnRegistrationsThisInterval = StorageMap<_, Identity, NetUid, u16, ValueQuery>; #[pallet::storage] + /// --- MAP ( netuid ) --> min_allowed_uids + pub type MinAllowedUids = + StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultMinAllowedUids>; + #[pallet::storage] /// --- MAP ( netuid ) --> max_allowed_uids pub type MaxAllowedUids = StorageMap<_, Identity, NetUid, u16, ValueQuery, DefaultMaxAllowedUids>; diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index b89fe8e865..5ecc77364c 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -128,7 +128,10 @@ mod config { /// Kappa constant. #[pallet::constant] type InitialKappa: Get; - /// Max UID constant. + /// Initial minimum allowed network UIDs + #[pallet::constant] + type InitialMinAllowedUids: Get; + /// Initial maximum allowed network UIDs #[pallet::constant] type InitialMaxAllowedUids: Get; /// Initial validator context pruning length. @@ -191,9 +194,6 @@ mod config { /// Initial network immunity period #[pallet::constant] type InitialNetworkImmunityPeriod: Get; - /// Initial minimum allowed network UIDs - #[pallet::constant] - type InitialNetworkMinAllowedUids: Get; /// Initial network minimum burn cost #[pallet::constant] type InitialNetworkMinLockCost: Get; diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index cfa1770029..3d33f68bf3 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -1,5 +1,6 @@ use super::*; use frame_support::storage::IterableStorageDoubleMap; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use sp_std::vec; use subtensor_runtime_common::NetUid; @@ -114,115 +115,187 @@ impl Pallet { Self::if_subnet_exist(netuid), Error::::SubNetworkDoesNotExist ); - ensure!(max_n > 16, Error::::InvalidValue); ensure!( - max_n <= Self::get_max_allowed_uids(netuid), + max_n >= MinAllowedUids::::get(netuid), + Error::::InvalidValue + ); + ensure!( + max_n <= MaxAllowedUids::::get(netuid), Error::::InvalidValue ); - // Set the value. MaxAllowedUids::::insert(netuid, max_n); - // Check if we need to trim. - let current_n: u16 = Self::get_subnetwork_n(netuid); + let owner = SubnetOwner::::get(netuid); + let owner_uids = BTreeSet::from_iter(Self::get_immune_owner_uids(netuid, &owner)); + let current_n = Self::get_subnetwork_n(netuid); - // We need to trim, get rid of values between max_n and current_n. if current_n > max_n { - let ranks: Vec = Rank::::get(netuid); - let trimmed_ranks: Vec = ranks.into_iter().take(max_n as usize).collect(); - Rank::::insert(netuid, trimmed_ranks); - - let trust: Vec = Trust::::get(netuid); - let trimmed_trust: Vec = trust.into_iter().take(max_n as usize).collect(); - Trust::::insert(netuid, trimmed_trust); + // Get all emissions with their UIDs and sort by emission (descending) + // This ensures we keep the highest emitters and remove the lowest ones + let mut emissions = Emission::::get(netuid) + .into_iter() + .enumerate() + .collect::>(); + emissions.sort_by_key(|(_, emission)| std::cmp::Reverse(*emission)); + + // Remove uids from the end (lowest emitters) until we reach the new maximum + let mut removed_uids = BTreeSet::new(); + let mut uids_left_to_process = current_n; + + // Iterate from the end (lowest emitters) to the beginning + for i in (0..current_n).rev() { + if uids_left_to_process == max_n { + break; // We've reached the target number of UIDs + } - let active: Vec = Active::::get(netuid); - let trimmed_active: Vec = active.into_iter().take(max_n as usize).collect(); - Active::::insert(netuid, trimmed_active); + if let Some((uid, _)) = emissions.get(i as usize).cloned() { + // Skip subnet owner's or temporally immune uids + if owner_uids.contains(&(uid as u16)) + || Self::get_neuron_is_immune(netuid, uid as u16) + { + continue; + } + + // Remove hotkey related storage items if hotkey exists + if let Ok(hotkey) = Keys::::try_get(netuid, uid as u16) { + Uids::::remove(netuid, &hotkey); + IsNetworkMember::::remove(&hotkey, netuid); + LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); + AlphaDividendsPerSubnet::::remove(netuid, &hotkey); + TaoDividendsPerSubnet::::remove(netuid, &hotkey); + Axons::::remove(netuid, &hotkey); + NeuronCertificates::::remove(netuid, &hotkey); + Prometheus::::remove(netuid, &hotkey); + } + + // Remove all storage items associated with this uid + Keys::::remove(netuid, uid as u16); + BlockAtRegistration::::remove(netuid, uid as u16); + Weights::::remove(netuid, uid as u16); + Bonds::::remove(netuid, uid as u16); + + // Remove from emissions array and track as removed + emissions.remove(i.into()); + removed_uids.insert(uid); + uids_left_to_process = uids_left_to_process.saturating_sub(1); + } + } - let emission: Vec = Emission::::get(netuid); - let trimmed_emission: Vec = - emission.into_iter().take(max_n as usize).collect(); - Emission::::insert(netuid, trimmed_emission); + // Sort remaining emissions by uid to compress uids to the left + // This ensures consecutive uid indices in the final arrays + emissions.sort_by_key(|(uid, _)| *uid); + + // Extract the final uids and emissions after trimming and sorting + let (trimmed_uids, trimmed_emissions): (Vec, Vec) = + emissions.into_iter().unzip(); + + // Get all current arrays from storage + let ranks = Rank::::get(netuid); + let trust = Trust::::get(netuid); + let active = Active::::get(netuid); + let consensus = Consensus::::get(netuid); + let incentive = Incentive::::get(netuid); + let dividends = Dividends::::get(netuid); + let lastupdate = LastUpdate::::get(netuid); + let pruning_scores = PruningScores::::get(netuid); + let vtrust = ValidatorTrust::::get(netuid); + let vpermit = ValidatorPermit::::get(netuid); + let stake_weight = StakeWeight::::get(netuid); + + // Create trimmed arrays by extracting values for kept uids only + // Pre-allocate vectors with exact capacity for efficiency + let mut trimmed_ranks = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_trust = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_active = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_consensus = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_dividends = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_pruning_scores = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_vtrust = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_vpermit = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_stake_weight = Vec::with_capacity(trimmed_uids.len()); + + // Single iteration to extract values for all kept uids + for &old_uid in &trimmed_uids { + trimmed_ranks.push(ranks.get(old_uid).cloned().unwrap_or_default()); + trimmed_trust.push(trust.get(old_uid).cloned().unwrap_or_default()); + trimmed_active.push(active.get(old_uid).cloned().unwrap_or_default()); + trimmed_consensus.push(consensus.get(old_uid).cloned().unwrap_or_default()); + trimmed_incentive.push(incentive.get(old_uid).cloned().unwrap_or_default()); + trimmed_dividends.push(dividends.get(old_uid).cloned().unwrap_or_default()); + trimmed_lastupdate.push(lastupdate.get(old_uid).cloned().unwrap_or_default()); + trimmed_pruning_scores + .push(pruning_scores.get(old_uid).cloned().unwrap_or_default()); + trimmed_vtrust.push(vtrust.get(old_uid).cloned().unwrap_or_default()); + trimmed_vpermit.push(vpermit.get(old_uid).cloned().unwrap_or_default()); + trimmed_stake_weight.push(stake_weight.get(old_uid).cloned().unwrap_or_default()); + } - let consensus: Vec = Consensus::::get(netuid); - let trimmed_consensus: Vec = consensus.into_iter().take(max_n as usize).collect(); + // Update storage with trimmed arrays + Emission::::insert(netuid, trimmed_emissions); + Rank::::insert(netuid, trimmed_ranks); + Trust::::insert(netuid, trimmed_trust); + Active::::insert(netuid, trimmed_active); Consensus::::insert(netuid, trimmed_consensus); - - let incentive: Vec = Incentive::::get(netuid); - let trimmed_incentive: Vec = incentive.into_iter().take(max_n as usize).collect(); Incentive::::insert(netuid, trimmed_incentive); - - let dividends: Vec = Dividends::::get(netuid); - let trimmed_dividends: Vec = dividends.into_iter().take(max_n as usize).collect(); Dividends::::insert(netuid, trimmed_dividends); - - let lastupdate: Vec = LastUpdate::::get(netuid); - let trimmed_lastupdate: Vec = - lastupdate.into_iter().take(max_n as usize).collect(); LastUpdate::::insert(netuid, trimmed_lastupdate); - - let pruning_scores: Vec = PruningScores::::get(netuid); - let trimmed_pruning_scores: Vec = - pruning_scores.into_iter().take(max_n as usize).collect(); PruningScores::::insert(netuid, trimmed_pruning_scores); - - let vtrust: Vec = ValidatorTrust::::get(netuid); - let trimmed_vtrust: Vec = vtrust.into_iter().take(max_n as usize).collect(); ValidatorTrust::::insert(netuid, trimmed_vtrust); - - let vpermit: Vec = ValidatorPermit::::get(netuid); - let trimmed_vpermit: Vec = vpermit.into_iter().take(max_n as usize).collect(); ValidatorPermit::::insert(netuid, trimmed_vpermit); - - let stake_weight: Vec = StakeWeight::::get(netuid); - let trimmed_stake_weight: Vec = - stake_weight.into_iter().take(max_n as usize).collect(); StakeWeight::::insert(netuid, trimmed_stake_weight); - for uid in max_n..current_n { - // Trim UIDs and Keys by removing entries with UID >= max_n (since UIDs are 0-indexed) - // UIDs range from 0 to current_n-1, so we remove UIDs from max_n to current_n-1 - if let Ok(hotkey) = Keys::::try_get(netuid, uid) { - Uids::::remove(netuid, &hotkey); - // Remove IsNetworkMember association for the hotkey - IsNetworkMember::::remove(&hotkey, netuid); - // Remove last hotkey emission for the hotkey - LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); - // Remove alpha dividends for the hotkey - AlphaDividendsPerSubnet::::remove(netuid, &hotkey); - // Remove tao dividends for the hotkey - TaoDividendsPerSubnet::::remove(netuid, &hotkey); - // Trim axons, certificates, and prometheus info for removed hotkeys - Axons::::remove(netuid, &hotkey); - NeuronCertificates::::remove(netuid, &hotkey); - Prometheus::::remove(netuid, &hotkey); - } - #[allow(unknown_lints)] - Keys::::remove(netuid, uid); - // Remove block at registration for the uid - BlockAtRegistration::::remove(netuid, uid); - // Remove entire weights and bonds entries for removed UIDs - Weights::::remove(netuid, uid); - Bonds::::remove(netuid, uid); - } - - // Trim weight and bond connections to removed UIDs for remaining neurons - // UIDs 0 to max_n-1 are kept, so we iterate through these valid UIDs - for uid in 0..max_n { - Weights::::mutate(netuid, uid, |weights| { - weights.retain(|(target_uid, _)| *target_uid < max_n); + // Create mapping from old uid to new compressed uid + // This is needed to update connections (weights and bonds) with correct uid references + let old_to_new_uid: BTreeMap = trimmed_uids + .iter() + .enumerate() + .map(|(new_uid, &old_uid)| (old_uid, new_uid)) + .collect(); + + // Update connections (weights and bonds) for each kept uid + // This involves three operations per uid: + // 1. Swap the uid storage to the new compressed position + // 2. Update all connections to reference the new compressed uids + // 3. Clear the connections to the trimmed uids + for (old_uid, new_uid) in &old_to_new_uid { + // Swap uid specific storage items to new compressed positions + Keys::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); + BlockAtRegistration::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); + + // Swap to new position and remap all target uids + Weights::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); + Weights::::mutate(netuid, *new_uid as u16, |weights| { + weights.retain_mut(|(target_uid, _weight)| { + if let Some(new_target_uid) = old_to_new_uid.get(&(*target_uid as usize)) { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) }); - Bonds::::mutate(netuid, uid, |bonds| { - bonds.retain(|(target_uid, _)| *target_uid < max_n); + + // Swap to new position and remap all target uids + Bonds::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); + Bonds::::mutate(netuid, *new_uid as u16, |bonds| { + bonds.retain_mut(|(target_uid, _bond)| { + if let Some(new_target_uid) = old_to_new_uid.get(&(*target_uid as usize)) { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) }); } - // Update the subnetwork size + // Update the subnet's uid count to reflect the new maximum SubnetworkN::::insert(netuid, max_n); } - // --- Ok and done. Ok(()) } diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index d011983383..30efd9bfd3 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -202,7 +202,6 @@ parameter_types! { pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -436,7 +435,6 @@ impl crate::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 3248c45e60..2871f6a792 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -194,7 +194,6 @@ parameter_types! { pub const InitialRAORecycledForRegistration: u64 = 0; pub const InitialSenateRequiredStakePercentage: u64 = 2; // 2 percent of total stake pub const InitialNetworkImmunityPeriod: u64 = 7200 * 7; - pub const InitialNetworkMinAllowedUids: u16 = 128; pub const InitialNetworkMinLockCost: u64 = 100_000_000_000; pub const InitialSubnetOwnerCut: u16 = 0; // 0%. 100% of rewards go to validators + miners. pub const InitialNetworkLockReductionInterval: u64 = 2; // 2 blocks. @@ -270,7 +269,6 @@ impl pallet_subtensor::Config for Test { type InitialRAORecycledForRegistration = InitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = InitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = InitialNetworkImmunityPeriod; - type InitialNetworkMinAllowedUids = InitialNetworkMinAllowedUids; type InitialNetworkMinLockCost = InitialNetworkMinLockCost; type InitialSubnetOwnerCut = InitialSubnetOwnerCut; type InitialNetworkLockReductionInterval = InitialNetworkLockReductionInterval; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index cc9c02eca3..3833eac796 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1140,7 +1140,7 @@ parameter_types! { pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; - pub const SubtensorInitialMinAllowedUids: u16 = 128; + pub const SubtensorInitialMinAllowedUids: u16 = 256; pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent // pub const SubtensorInitialSubnetLimit: u16 = 12; // (DEPRECATED) @@ -1216,7 +1216,6 @@ impl pallet_subtensor::Config for Runtime { type InitialRAORecycledForRegistration = SubtensorInitialRAORecycledForRegistration; type InitialSenateRequiredStakePercentage = SubtensorInitialSenateRequiredStakePercentage; type InitialNetworkImmunityPeriod = SubtensorInitialNetworkImmunity; - type InitialNetworkMinAllowedUids = SubtensorInitialMinAllowedUids; type InitialNetworkMinLockCost = SubtensorInitialMinLockCost; type InitialNetworkLockReductionInterval = SubtensorInitialNetworkLockReductionInterval; type InitialSubnetOwnerCut = SubtensorInitialSubnetOwnerCut; From 15a39251b44e4a03b4a78051dd7c7885c16a4874 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 11:39:27 -0300 Subject: [PATCH 159/379] add sudo set min allowed uids --- pallets/admin-utils/src/benchmarking.rs | 11 ++++ pallets/admin-utils/src/lib.rs | 35 +++++++++++++ pallets/admin-utils/src/tests/mod.rs | 64 +++++++++++++++++++++++ pallets/subtensor/src/macros/events.rs | 3 ++ pallets/subtensor/src/subnets/uids.rs | 4 +- pallets/subtensor/src/tests/mock.rs | 4 +- pallets/subtensor/src/utils/misc.rs | 8 +++ pallets/transaction-fee/src/tests/mock.rs | 4 +- runtime/src/lib.rs | 1 + 9 files changed, 130 insertions(+), 4 deletions(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 3e42763f68..919591ba39 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -203,6 +203,17 @@ mod benchmarks { _(RawOrigin::Root, 1u16.into()/*netuid*/, 3u16/*kappa*/)/*set_kappa*/; } + #[benchmark] + fn sudo_set_min_allowed_uids() { + pallet_subtensor::Pallet::::init_new_network( + 1u16.into(), /*netuid*/ + 1u16, /*tempo*/ + ); + + #[extrinsic_call] + _(RawOrigin::Root, 1u16.into()/*netuid*/, 32u16/*max_allowed_uids*/)/*sudo_set_max_allowed_uids*/; + } + #[benchmark] fn sudo_set_max_allowed_uids() { pallet_subtensor::Pallet::::init_new_network( diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 5a49796e30..66f33d442c 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -109,6 +109,10 @@ pub mod pallet { NegativeSigmoidSteepness, /// Value not in allowed bounds. ValueNotInBounds, + /// The minimum allowed UIDs must be less than the current number of UIDs in the subnet. + MinAllowedUidsGreaterThanCurrentUids, + /// The minimum allowed UIDs must be less than the maximum allowed UIDs. + MinAllowedUidsGreaterThanMaxAllowedUids, } /// Enum for specifying the type of precompile operation. #[derive( @@ -1654,6 +1658,37 @@ pub mod pallet { pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; Ok(()) } + + /// The extrinsic sets the minimum allowed UIDs for a subnet. + /// It is only callable by the root account. + #[pallet::call_index(75)] + #[pallet::weight(Weight::from_parts(18_800_000, 0) + .saturating_add(::DbWeight::get().reads(2_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_min_allowed_uids( + origin: OriginFor, + netuid: NetUid, + min_allowed_uids: u16, + ) -> DispatchResult { + ensure_root(origin)?; + ensure!( + pallet_subtensor::Pallet::::if_subnet_exist(netuid), + Error::::SubnetDoesNotExist + ); + ensure!( + min_allowed_uids < pallet_subtensor::Pallet::::get_max_allowed_uids(netuid), + Error::::MinAllowedUidsGreaterThanMaxAllowedUids + ); + ensure!( + min_allowed_uids < pallet_subtensor::Pallet::::get_subnetwork_n(netuid), + Error::::MinAllowedUidsGreaterThanCurrentUids + ); + pallet_subtensor::Pallet::::set_min_allowed_uids(netuid, min_allowed_uids); + log::debug!( + "MinAllowedUidsSet( netuid: {netuid:?} min_allowed_uids: {min_allowed_uids:?} ) " + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index ccce3a1e49..0977fadca6 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2297,3 +2297,67 @@ fn test_trim_to_max_allowed_uids() { ); }); } + +#[test] +fn test_sudo_set_min_allowed_uids() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let to_be_set: u16 = 8; + add_network(netuid, 10); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + + // Register some neurons + for i in 0..=16 { + register_ok_neuron(netuid, U256::from(i * 1000), U256::from(i * 1000 + i), 0); + } + + // Normal case + assert_ok!(AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + to_be_set + )); + assert_eq!(SubtensorModule::get_min_allowed_uids(netuid), to_be_set); + + // Non root + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::signed(U256::from(0)), + netuid, + to_be_set + ), + DispatchError::BadOrigin + ); + + // Non existent subnet + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + NetUid::from(42), + to_be_set + ), + Error::::SubnetDoesNotExist + ); + + // Min allowed uids greater than max allowed uids + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_max_allowed_uids(netuid) + 1 + ), + Error::::MinAllowedUidsGreaterThanMaxAllowedUids + ); + + // Min allowed uids greater than current uids + assert_err!( + AdminUtils::sudo_set_min_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + SubtensorModule::get_subnetwork_n(netuid) + 1 + ), + Error::::MinAllowedUidsGreaterThanCurrentUids + ); + }); +} diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fab5ecdb4..bc3b2c999d 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -413,5 +413,8 @@ mod events { /// - **netuid**: The network identifier. /// - **who**: The account ID of the user revealing the weights. TimelockedWeightsRevealed(NetUid, T::AccountId), + + /// The minimum allowed UIDs for a subnet have been set. + MinAllowedUidsSet(NetUid, u16), } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 3d33f68bf3..e8353f58df 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -1,7 +1,7 @@ use super::*; use frame_support::storage::IterableStorageDoubleMap; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::vec; +use sp_std::{cmp, vec}; use subtensor_runtime_common::NetUid; impl Pallet { @@ -137,7 +137,7 @@ impl Pallet { .into_iter() .enumerate() .collect::>(); - emissions.sort_by_key(|(_, emission)| std::cmp::Reverse(*emission)); + emissions.sort_by_key(|(_, emission)| cmp::Reverse(*emission)); // Remove uids from the end (lowest emitters) until we reach the new maximum let mut removed_uids = BTreeSet::new(); diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 30efd9bfd3..0485c0d19d 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -164,7 +164,8 @@ parameter_types! { pub const InitialTempo: u16 = 360; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty:u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -404,6 +405,7 @@ impl crate::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index f64962f094..5ee88ef3ca 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -459,6 +459,14 @@ impl Pallet { Self::deposit_event(Event::MinAllowedWeightSet(netuid, min_allowed_weights)); } + pub fn get_min_allowed_uids(netuid: NetUid) -> u16 { + MinAllowedUids::::get(netuid) + } + pub fn set_min_allowed_uids(netuid: NetUid, min_allowed: u16) { + MinAllowedUids::::insert(netuid, min_allowed); + Self::deposit_event(Event::MinAllowedUidsSet(netuid, min_allowed)); + } + pub fn get_max_allowed_uids(netuid: NetUid) -> u16 { MaxAllowedUids::::get(netuid) } diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 2871f6a792..8f9eb729a3 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -156,7 +156,8 @@ parameter_types! { pub const InitialTempo: u16 = 0; pub const SelfOwnership: u64 = 2; pub const InitialImmunityPeriod: u16 = 2; - pub const InitialMaxAllowedUids: u16 = 2; + pub const InitialMinAllowedUids: u16 = 2; + pub const InitialMaxAllowedUids: u16 = 4; pub const InitialBondsMovingAverage: u64 = 900_000; pub const InitialBondsPenalty: u16 = u16::MAX; pub const InitialBondsResetOn: bool = false; @@ -238,6 +239,7 @@ impl pallet_subtensor::Config for Test { type InitialRho = InitialRho; type InitialAlphaSigmoidSteepness = InitialAlphaSigmoidSteepness; type InitialKappa = InitialKappa; + type InitialMinAllowedUids = InitialMinAllowedUids; type InitialMaxAllowedUids = InitialMaxAllowedUids; type InitialValidatorPruneLen = InitialValidatorPruneLen; type InitialScalingLawPower = InitialScalingLawPower; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 3833eac796..71ca76548e 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1176,6 +1176,7 @@ impl pallet_subtensor::Config for Runtime { type InitialRho = SubtensorInitialRho; type InitialAlphaSigmoidSteepness = SubtensorInitialAlphaSigmoidSteepness; type InitialKappa = SubtensorInitialKappa; + type InitialMinAllowedUids = SubtensorInitialMinAllowedUids; type InitialMaxAllowedUids = SubtensorInitialMaxAllowedUids; type InitialBondsMovingAverage = SubtensorInitialBondsMovingAverage; type InitialBondsPenalty = SubtensorInitialBondsPenalty; From ecc7f13ba34f057bd3f4cbfdbf3370b93273e367 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 11:54:03 -0300 Subject: [PATCH 160/379] commit Cargo.lock --- pallets/admin-utils/src/tests/mod.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 0977fadca6..968b65a486 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2152,8 +2152,8 @@ fn test_trim_to_max_allowed_uids() { for target_uid in 0..max_n { if target_uid != uid { // Use some non-zero values to make the test more meaningful - let weight_value = ((uid + target_uid) % 1000) as u16; - let bond_value = ((uid * target_uid) % 1000) as u16; + let weight_value = (uid + target_uid) % 1000; + let bond_value = (uid * target_uid) % 1000; weights.push((target_uid, weight_value)); bonds.push((target_uid, bond_value)); } @@ -2225,20 +2225,20 @@ fn test_trim_to_max_allowed_uids() { U256::from(16000), ]; for hotkey in trimmed_hotkeys { - assert!(!Uids::::contains_key(netuid, &hotkey)); - assert!(!IsNetworkMember::::contains_key(&hotkey, netuid)); + assert!(!Uids::::contains_key(netuid, hotkey)); + assert!(!IsNetworkMember::::contains_key(hotkey, netuid)); assert!(!LastHotkeyEmissionOnNetuid::::contains_key( - &hotkey, netuid + hotkey, netuid )); assert!(!AlphaDividendsPerSubnet::::contains_key( - netuid, &hotkey + netuid, hotkey )); assert!(!TaoDividendsPerSubnet::::contains_key( - netuid, &hotkey + netuid, hotkey )); - assert!(!Axons::::contains_key(netuid, &hotkey)); - assert!(!NeuronCertificates::::contains_key(netuid, &hotkey)); - assert!(!Prometheus::::contains_key(netuid, &hotkey)); + assert!(!Axons::::contains_key(netuid, hotkey)); + assert!(!NeuronCertificates::::contains_key(netuid, hotkey)); + assert!(!Prometheus::::contains_key(netuid, hotkey)); } // Ensure trimmed uids weights and bonds have been cleared From 30f3f0dea2285685f645aa1070b8acefee013751 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 11:56:07 -0300 Subject: [PATCH 161/379] cargo fmt --- pallets/admin-utils/src/tests/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 968b65a486..fa9f31a935 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2233,9 +2233,7 @@ fn test_trim_to_max_allowed_uids() { assert!(!AlphaDividendsPerSubnet::::contains_key( netuid, hotkey )); - assert!(!TaoDividendsPerSubnet::::contains_key( - netuid, hotkey - )); + assert!(!TaoDividendsPerSubnet::::contains_key(netuid, hotkey)); assert!(!Axons::::contains_key(netuid, hotkey)); assert!(!NeuronCertificates::::contains_key(netuid, hotkey)); assert!(!Prometheus::::contains_key(netuid, hotkey)); From aa4c566a94f8e9d5120489f5662b7328706a8ff7 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 11:57:12 -0300 Subject: [PATCH 162/379] commit Cargo.lock --- pallets/subtensor/src/subnets/uids.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index e8353f58df..964d17a136 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -170,6 +170,7 @@ impl Pallet { } // Remove all storage items associated with this uid + #[allow(unknown_lints)] Keys::::remove(netuid, uid as u16); BlockAtRegistration::::remove(netuid, uid as u16); Weights::::remove(netuid, uid as u16); From b7e434ccae0cd6d3f10c5f2360467e3ac30af562 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 12:10:50 -0300 Subject: [PATCH 163/379] fix merge --- pallets/admin-utils/src/benchmarking.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 919591ba39..871ceeb69e 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -356,6 +356,17 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, 5u16/*version*/)/*sudo_set_commit_reveal_version()*/; } + + #[benchmark] + fn sudo_set_owner_immune_neuron_limit() { + pallet_subtensor::Pallet::::init_new_network( + 1u16.into(), /*netuid*/ + 1u16, /*sudo_tempo*/ + ); + + #[extrinsic_call] + _(RawOrigin::Root, 1u16.into()/*netuid*/, 5u16/*immune_neurons*/)/*sudo_set_owner_immune_neuron_limit()*/; + } #[benchmark] fn sudo_trim_to_max_allowed_uids() { From 26798bd9a11faac5b5afd33e89ab80a2df77e231 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 12:14:25 -0300 Subject: [PATCH 164/379] fix merge 2 --- pallets/admin-utils/src/benchmarking.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 871ceeb69e..a00058d2f7 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -356,7 +356,7 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, 5u16/*version*/)/*sudo_set_commit_reveal_version()*/; } - + #[benchmark] fn sudo_set_owner_immune_neuron_limit() { pallet_subtensor::Pallet::::init_new_network( From 8089518a62a022dfcbeb5a3b2d075f9de510fc4f Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Tue, 9 Sep 2025 18:23:19 +0300 Subject: [PATCH 165/379] Update EVM tests --- .../neuron.precompile.reveal-weights.test.ts | 18 ++++++++++++++++-- .../test/neuron.precompile.set-weights.test.ts | 18 ++++++++++++++++-- .../test/staking.precompile.reward.test.ts | 16 +++++++++++++++- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 4ac63468db..52ddc91967 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -1,5 +1,5 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { PolkadotSigner, TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" @@ -70,6 +70,20 @@ describe("Test neuron precompile reveal weights", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) + // Disable admin freeze window and owner hyperparam rate limiting for tests + { + const alice = getAliceSigner() + + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } await setWeightsSetRateLimit(api, netuid, BigInt(0)) @@ -164,4 +178,4 @@ describe("Test neuron precompile reveal weights", () => { assert.ok(weight[1] !== undefined) } }) -}); \ No newline at end of file +}); diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 1c9f62e773..4ecc0b36db 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -1,6 +1,6 @@ import * as assert from "assert"; -import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertH160ToSS58, convertPublicKeyToSs58, } from "../src/address-utils" @@ -38,6 +38,20 @@ describe("Test neuron precompile contract, set weights function", () => { await burnedRegister(api, netuid, convertH160ToSS58(wallet.address), coldkey) const uid = await api.query.SubtensorModule.Uids.getValue(netuid, convertH160ToSS58(wallet.address)) assert.notEqual(uid, undefined) + // Disable admin freeze window and owner hyperparam rate limiting for tests + { + const alice = getAliceSigner() + + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } // disable reveal and enable direct set weights await setCommitRevealWeightsEnabled(api, netuid, false) await setWeightsSetRateLimit(api, netuid, BigInt(0)) @@ -68,4 +82,4 @@ describe("Test neuron precompile contract, set weights function", () => { }); } }) -}); \ No newline at end of file +}); diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 79ad977515..108e0ed88c 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -1,5 +1,5 @@ import * as assert from "assert"; -import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" @@ -39,6 +39,20 @@ describe("Test neuron precompile reward", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) + // Disable admin freeze window and owner hyperparam rate limiting for tests + { + const alice = getAliceSigner() + + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } await setTxRateLimit(api, BigInt(0)) await setTempo(api, root_netuid, root_tempo) From c19b71fd90d14f79cff1b5405479c6654d899a0a Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 12:27:56 -0300 Subject: [PATCH 166/379] cargo fmt --- pallets/admin-utils/src/benchmarking.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index a00058d2f7..7a9ff00e9e 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -366,7 +366,7 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, 1u16.into()/*netuid*/, 5u16/*immune_neurons*/)/*sudo_set_owner_immune_neuron_limit()*/; - } + } #[benchmark] fn sudo_trim_to_max_allowed_uids() { From a257909b8b5b9785c1262e7cec914e2edfa36221 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 12:15:28 -0400 Subject: [PATCH 167/379] Add regular rate limiting for subsubnet parameters --- pallets/admin-utils/src/lib.rs | 26 ++++++++++++++++++-- pallets/subtensor/src/lib.rs | 6 ++--- pallets/subtensor/src/utils/rate_limiting.rs | 4 +++ 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2131d4dbce..7a18794c8f 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1630,8 +1630,19 @@ pub mod pallet { netuid: NetUid, subsub_count: SubId, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); Ok(()) } @@ -1645,8 +1656,19 @@ pub mod pallet { netuid: NetUid, maybe_split: Option>, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); Ok(()) } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index da4dfe9e1c..bb68912465 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1825,9 +1825,9 @@ pub mod pallet { SubId::from(8) } #[pallet::type_value] - /// -- ITEM (Number of tempos in subnet super-block) - pub fn SuperBlockTempos() -> u16 { - 20 + /// -- ITEM (Rate limit for subsubnet count updates) + pub fn SetSubsubnetCountRateLimit() -> u64 { + 7200 } #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index de75086ea1..e68b7f066f 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -11,6 +11,7 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, + SubsubnetParameterUpdate, } /// Implement conversion from TransactionType to u16 @@ -23,6 +24,7 @@ impl From for u16 { TransactionType::RegisterNetwork => 3, TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, + TransactionType::SubsubnetParameterUpdate => 7, } } } @@ -36,6 +38,7 @@ impl From for TransactionType { 3 => TransactionType::RegisterNetwork, 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, + 7 => TransactionType::SubsubnetParameterUpdate, _ => TransactionType::Unknown, } } @@ -50,6 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), + TransactionType::SubsubnetParameterUpdate => SetSubsubnetCountRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, From 749fc1196245b910e5e7a52348ed714c800f26b9 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 16:50:36 -0400 Subject: [PATCH 168/379] Fix immune owner UIDs --- .../subtensor/src/coinbase/run_coinbase.rs | 21 ++----- pallets/subtensor/src/subnets/registration.rs | 52 +++++++++++++-- pallets/subtensor/src/tests/coinbase.rs | 63 +------------------ pallets/subtensor/src/tests/registration.rs | 42 +++++++++++++ 4 files changed, 97 insertions(+), 81 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index c76e6941e3..8e78d7d945 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -428,7 +428,7 @@ impl Pallet { (prop_alpha_dividends, tao_dividends) } - fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + fn get_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) .into_iter() @@ -445,28 +445,19 @@ impl Pallet { // Recent registration is priority so that we can let older keys expire (get non-immune) triples.sort_by(|(b1, u1, _), (b2, u2, _)| b2.cmp(b1).then(u1.cmp(u2))); - // Keep first ImmuneOwnerUidsLimit - let limit = ImmuneOwnerUidsLimit::::get(netuid).into(); - if triples.len() > limit { - triples.truncate(limit); - } - // Project to just hotkeys - let mut immune_hotkeys: Vec = + let mut owner_hotkeys: Vec = triples.into_iter().map(|(_, _, hk)| hk).collect(); // Insert subnet owner hotkey in the beginning of the list if valid and not // already present if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { - immune_hotkeys.insert(0, owner_hk); - if immune_hotkeys.len() > limit { - immune_hotkeys.truncate(limit); - } + if Uids::::get(netuid, &owner_hk).is_some() && !owner_hotkeys.contains(&owner_hk) { + owner_hotkeys.insert(0, owner_hk); } } - immune_hotkeys + owner_hotkeys } pub fn distribute_dividends_and_incentives( @@ -498,7 +489,7 @@ impl Pallet { // Distribute mining incentives. let subnet_owner_coldkey = SubnetOwner::::get(netuid); - let owner_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); + let owner_hotkeys = Self::get_owner_hotkeys(netuid, &subnet_owner_coldkey); log::debug!("incentives: owner hotkeys: {owner_hotkeys:?}"); for (hotkey, incentive) in incentives { log::debug!("incentives: hotkey: {incentive:?}"); diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index c8f6b04cb6..6f11921dba 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -388,6 +388,47 @@ impl Pallet { real_hash } + fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. + let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) + .into_iter() + .filter_map(|hotkey| { + // Uids must exist, filter_map ignores hotkeys without UID + Uids::::get(netuid, &hotkey).map(|uid| { + let block = BlockAtRegistration::::get(netuid, uid); + (block, uid, hotkey) + }) + }) + .collect(); + + // Sort by BlockAtRegistration (descending), then by uid (ascending) + // Recent registration is priority so that we can let older keys expire (get non-immune) + triples.sort_by(|(b1, u1, _), (b2, u2, _)| b2.cmp(b1).then(u1.cmp(u2))); + + // Keep first ImmuneOwnerUidsLimit + let limit = ImmuneOwnerUidsLimit::::get(netuid).into(); + if triples.len() > limit { + triples.truncate(limit); + } + + // Project to just hotkeys + let mut immune_hotkeys: Vec = + triples.into_iter().map(|(_, _, hk)| hk).collect(); + + // Insert subnet owner hotkey in the beginning of the list if valid and not + // already present + if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { + if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { + immune_hotkeys.insert(0, owner_hk); + if immune_hotkeys.len() > limit { + immune_hotkeys.truncate(limit); + } + } + } + + immune_hotkeys + } + /// Determine which peer to prune from the network by finding the element with the lowest pruning score out of /// immunity period. If there is a tie for lowest pruning score, the neuron registered earliest is pruned. /// If all neurons are in immunity period, the neuron with the lowest pruning score is pruned. If there is a tie for @@ -411,13 +452,14 @@ impl Pallet { return 0; // If there are no neurons in this network. } + // Get the list of immortal (top-k by registration time of owner owned) keys + let subnet_owner_coldkey = SubnetOwner::::get(netuid); + let immortal_hotkeys = Self::get_immune_owner_hotkeys(netuid, &subnet_owner_coldkey); for neuron_uid in 0..neurons_n { - // Do not deregister the owner's hotkey from the `SubnetOwnerHotkey` map + // Do not deregister the owner's owned hotkeys if let Ok(hotkey) = Self::get_hotkey_for_net_and_uid(netuid, neuron_uid) { - if let Ok(top_sn_owner_hotkey) = SubnetOwnerHotkey::::try_get(netuid) { - if top_sn_owner_hotkey == hotkey { - continue; - } + if immortal_hotkeys.contains(&hotkey) { + continue; } } diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 30cef8556f..e80491fa55 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -1857,62 +1857,6 @@ fn test_incentive_to_subnet_owners_hotkey_is_burned() { }); } -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_incentive_to_subnet_owners_hotkey_is_burned_with_limit --exact --show-output --nocapture -#[test] -fn test_incentive_to_subnet_owners_hotkey_is_burned_with_limit() { - new_test_ext(1).execute_with(|| { - let subnet_owner_ck = U256::from(0); - let subnet_owner_hk = U256::from(1); - - // Other hk owned by owner - let other_hk = U256::from(3); - Owner::::insert(other_hk, subnet_owner_ck); - OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_hk]); - - let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); - Uids::::insert(netuid, other_hk, 1); - - // Set the burn key limit to 1 - testing the limits - ImmuneOwnerUidsLimit::::insert(netuid, 1); - - let pending_tao: u64 = 1_000_000_000; - let pending_alpha = AlphaCurrency::ZERO; // None to valis - let owner_cut = AlphaCurrency::ZERO; - let mut incentives: BTreeMap = BTreeMap::new(); - - // Give incentive to other_hk - incentives.insert(other_hk, 10_000_000.into()); - - // Give incentives to subnet_owner_hk - incentives.insert(subnet_owner_hk, 10_000_000.into()); - - // Verify stake before - let subnet_owner_stake_before = - SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); - assert_eq!(subnet_owner_stake_before, 0.into()); - let other_stake_before = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); - assert_eq!(other_stake_before, 0.into()); - - // Distribute dividends and incentives - SubtensorModule::distribute_dividends_and_incentives( - netuid, - owner_cut, - incentives, - BTreeMap::new(), - BTreeMap::new(), - ); - - // Verify stake after - let subnet_owner_stake_after = - SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); - assert_eq!(subnet_owner_stake_after, 0.into()); - let other_stake_after = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk, netuid); - - // Testing the limit - should be not burned - assert!(other_stake_after > 0.into()); - }); -} - // Test that if number of sn owner hotkeys is greater than ImmuneOwnerUidsLimit, then the ones with // higher BlockAtRegistration are used to burn // SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::coinbase::test_burn_key_sorting --exact --show-output --nocapture @@ -1949,9 +1893,6 @@ fn test_burn_key_sorting() { Uids::::insert(netuid, other_hk_2, 3); Uids::::insert(netuid, other_hk_3, 2); - // Set the burn key limit to 3 because we also have sn owner - ImmuneOwnerUidsLimit::::insert(netuid, 3); - let pending_tao: u64 = 1_000_000_000; let pending_alpha = AlphaCurrency::ZERO; // None to valis let owner_cut = AlphaCurrency::ZERO; @@ -1979,7 +1920,7 @@ fn test_burn_key_sorting() { SubtensorModule::get_stake_for_hotkey_on_subnet(&subnet_owner_hk, netuid); assert_eq!(subnet_owner_stake_after, 0.into()); - // Testing the limits - HK1 and HK3 should be burned, HK2 should be not burned + // No burn limits, all HKs should be burned let other_stake_after_1 = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_1, netuid); let other_stake_after_2 = @@ -1987,7 +1928,7 @@ fn test_burn_key_sorting() { let other_stake_after_3 = SubtensorModule::get_stake_for_hotkey_on_subnet(&other_hk_3, netuid); assert_eq!(other_stake_after_1, 0.into()); - assert!(other_stake_after_2 > 0.into()); + assert_eq!(other_stake_after_2, 0.into()); assert_eq!(other_stake_after_3, 0.into()); }); } diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 7ccb591620..e2a5f1688c 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1,5 +1,6 @@ #![allow(clippy::unwrap_used)] +use crate::*; use approx::assert_abs_diff_eq; use frame_support::dispatch::DispatchInfo; use frame_support::sp_runtime::{DispatchError, transaction_validity::TransactionSource}; @@ -1335,6 +1336,47 @@ fn test_registration_get_uid_to_prune_none_in_immunity_period() { }); } +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::registration::test_registration_get_uid_to_prune_owner_immortality --exact --show-output --nocapture +#[test] +fn test_registration_get_uid_to_prune_owner_immortality() { + new_test_ext(1).execute_with(|| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_owner_hk = U256::from(2); + Owner::::insert(other_owner_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_owner_hk]); + + // Another hk not owned by owner + let non_owner_hk = U256::from(3); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + BlockAtRegistration::::insert(netuid, 1, 1); + BlockAtRegistration::::insert(netuid, 2, 2); + Uids::::insert(netuid, other_owner_hk, 1); + Uids::::insert(netuid, non_owner_hk, 2); + ImmunityPeriod::::insert(netuid, 1); + SubnetworkN::::insert(netuid, 3); + + step_block(10); + + // Set the burn key limit to 1 - testing the limits + ImmuneOwnerUidsLimit::::insert(netuid, 1); + + // Set lower pruning score to sn owner keys + PruningScores::::insert(netuid, vec![0, 0, 1]); + + // Other owner's hotkey is pruned because there's only 1 immune key and + // pruning score of owner key is lower + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); + + // Set the burn key limit to 2 - both owner keys are immune + ImmuneOwnerUidsLimit::::insert(netuid, 1); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); + }); +} + #[test] fn test_registration_pruning() { new_test_ext(1).execute_with(|| { From f37865e6b3ed44483278ded7ed048e8f96809d2e Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:15:08 -0400 Subject: [PATCH 169/379] More immortality tests --- pallets/subtensor/src/tests/registration.rs | 71 +++++++++++++++++---- 1 file changed, 58 insertions(+), 13 deletions(-) diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index e2a5f1688c..23013d9b70 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -1340,6 +1340,55 @@ fn test_registration_get_uid_to_prune_none_in_immunity_period() { #[test] fn test_registration_get_uid_to_prune_owner_immortality() { new_test_ext(1).execute_with(|| { + [ + // Burn key limit to 1 - testing the limits + // Other owner's hotkey is pruned because there's only 1 immune key and + // pruning score of owner key is lower + (1, 1), + // Burn key limit to 2 - both owner keys are immune + (2, 2), + ] + .iter() + .for_each(|(limit, uid_to_prune)| { + let subnet_owner_ck = U256::from(0); + let subnet_owner_hk = U256::from(1); + + // Other hk owned by owner + let other_owner_hk = U256::from(2); + Owner::::insert(other_owner_hk, subnet_owner_ck); + OwnedHotkeys::::insert(subnet_owner_ck, vec![subnet_owner_hk, other_owner_hk]); + + // Another hk not owned by owner + let non_owner_hk = U256::from(3); + + let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); + BlockAtRegistration::::insert(netuid, 1, 1); + BlockAtRegistration::::insert(netuid, 2, 2); + Uids::::insert(netuid, other_owner_hk, 1); + Uids::::insert(netuid, non_owner_hk, 2); + Keys::::insert(netuid, 1, other_owner_hk); + Keys::::insert(netuid, 2, non_owner_hk); + ImmunityPeriod::::insert(netuid, 1); + SubnetworkN::::insert(netuid, 3); + + step_block(10); + + ImmuneOwnerUidsLimit::::insert(netuid, *limit); + + // Set lower pruning score to sn owner keys + PruningScores::::insert(netuid, vec![0, 0, 1]); + + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), *uid_to_prune); + }); + }); +} + +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::registration::test_registration_get_uid_to_prune_owner_immortality_all_immune --exact --show-output --nocapture +#[test] +fn test_registration_get_uid_to_prune_owner_immortality_all_immune() { + new_test_ext(1).execute_with(|| { + let limit = 2; + let uid_to_prune = 2; let subnet_owner_ck = U256::from(0); let subnet_owner_hk = U256::from(1); @@ -1352,28 +1401,24 @@ fn test_registration_get_uid_to_prune_owner_immortality() { let non_owner_hk = U256::from(3); let netuid = add_dynamic_network(&subnet_owner_hk, &subnet_owner_ck); - BlockAtRegistration::::insert(netuid, 1, 1); - BlockAtRegistration::::insert(netuid, 2, 2); + BlockAtRegistration::::insert(netuid, 0, 12); + BlockAtRegistration::::insert(netuid, 1, 11); + BlockAtRegistration::::insert(netuid, 2, 10); Uids::::insert(netuid, other_owner_hk, 1); Uids::::insert(netuid, non_owner_hk, 2); - ImmunityPeriod::::insert(netuid, 1); + Keys::::insert(netuid, 1, other_owner_hk); + Keys::::insert(netuid, 2, non_owner_hk); + ImmunityPeriod::::insert(netuid, 100); SubnetworkN::::insert(netuid, 3); - step_block(10); + step_block(20); - // Set the burn key limit to 1 - testing the limits - ImmuneOwnerUidsLimit::::insert(netuid, 1); + ImmuneOwnerUidsLimit::::insert(netuid, limit); // Set lower pruning score to sn owner keys PruningScores::::insert(netuid, vec![0, 0, 1]); - // Other owner's hotkey is pruned because there's only 1 immune key and - // pruning score of owner key is lower - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 1); - - // Set the burn key limit to 2 - both owner keys are immune - ImmuneOwnerUidsLimit::::insert(netuid, 1); - assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), 2); + assert_eq!(SubtensorModule::get_neuron_to_prune(netuid), uid_to_prune); }); } From 268515e3e4ae6c589238416b7cfd64ddb1f9235e Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:16:07 -0400 Subject: [PATCH 170/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 40c76514d4..f938105bc6 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 313, + spec_version: 314, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 75ba336d64ef02fc62a843fc09b77eb751e05911 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:21:56 -0400 Subject: [PATCH 171/379] Custom subsubnet count setting rate limit for fast blocks --- pallets/subtensor/src/lib.rs | 4 ++-- pallets/subtensor/src/utils/rate_limiting.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index bb68912465..b2bd5d8532 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1826,8 +1826,8 @@ pub mod pallet { } #[pallet::type_value] /// -- ITEM (Rate limit for subsubnet count updates) - pub fn SetSubsubnetCountRateLimit() -> u64 { - 7200 + pub fn SubsubnetCountSetRateLimit() -> u64 { + prod_or_fast!(7_200, 0) } #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index e68b7f066f..e346279a42 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -53,7 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - TransactionType::SubsubnetParameterUpdate => SetSubsubnetCountRateLimit::::get(), + TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, From 9c221adf159b7388d7d0310e0aaaf3dd946fbe03 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:42:43 -0400 Subject: [PATCH 172/379] Fix merge --- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index ccb1f191a3..3dfcf0ac05 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -108,7 +108,7 @@ impl Pallet { let server_emission = extract_from_sorted_terms!(terms_sorted, server_emission); Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { - netuid, + netuid: netuid_index, emissions: server_emission, }); From db80cee80523056826ce01548ae1fe7389daf0e1 Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 00:01:47 +0200 Subject: [PATCH 173/379] Correct the as-bytes version of various symbols. --- pallets/subtensor/src/subnets/symbols.rs | 126 +++++++++++------------ 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index 4b37e9b6b0..c667fa6424 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -26,7 +26,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xCF\x80", // π (Pi, 16) b"\xCF\x81", // ρ (Rho, 17) b"\xCF\x83", // σ (Sigma, 18) - b"t", // t (Tau, 19) + b"\x74", // t (Tau, 19) b"\xCF\x85", // υ (Upsilon, 20) b"\xCF\x86", // φ (Phi, 21) b"\xCF\x87", // χ (Chi, 22) @@ -96,7 +96,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE1\x9A\xA6", // ᚦ (Thurisaz, giant, 83) b"\xE1\x9A\xA8", // ᚨ (Ansuz, god, 84) b"\xE1\x9A\xB1", // ᚱ (Raidho, ride, 85) - b"\xE1\x9A\xB3", // ᚲ (Kaunan, ulcer, 86) + b"\xE1\x9A\xB2", // ᚲ (Kaunan, ulcer, 86) b"\xD0\xAB", // Ы (Cyrillic Yeru, 87) b"\xE1\x9B\x89", // ᛉ (Algiz, protection, 88) b"\xE1\x9B\x92", // ᛒ (Berkanan, birch, 89) @@ -130,7 +130,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xD0\x83", // Ѓ (Gje, 113) b"\xD0\x84", // Є (Ukrainian Ie, 114) b"\xD0\x85", // Ѕ (Dze, 115) - b"\xD1\x8A", // Ъ (Hard sign, 116) + b"\xD0\xAA", // Ъ (Hard sign, 116) // Coptic Alphabet b"\xE2\xB2\x80", // Ⲁ (Alfa, 117) b"\xE2\xB2\x81", // ⲁ (Small Alfa, 118) @@ -145,12 +145,12 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xF0\x91\x80\x83", // 𑀃 (Ii, 126) b"\xF0\x91\x80\x85", // 𑀅 (U, 127) // End of Sinhala Alphabet - b"\xE0\xB6\xB1", // ඲ (La, 128) - b"\xE0\xB6\xB2", // ඳ (Va, 129) - b"\xE0\xB6\xB3", // ප (Sha, 130) - b"\xE0\xB6\xB4", // ඵ (Ssa, 131) - b"\xE0\xB6\xB5", // බ (Sa, 132) - b"\xE0\xB6\xB6", // භ (Ha, 133) + b"\xE0\xB6\xB2", // ඲ (La, 128) + b"\xE0\xB6\xB3", // ඳ (Va, 129) + b"\xE0\xB6\xB4", // ප (Sha, 130) + b"\xE0\xB6\xB5", // ඵ (Ssa, 131) + b"\xE0\xB6\xB6", // බ (Sa, 132) + b"\xE0\xB6\xB7", // භ (Ha, 133) // Glagolitic Alphabet b"\xE2\xB0\x80", // Ⰰ (Az, 134) b"\xE2\xB0\x81", // Ⰱ (Buky, 135) @@ -231,42 +231,42 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB8\xAD", // อ (O Ang, 209) b"\xE0\xB8\xAE", // ฮ (Ho Nokhuk, 210) // Hangul Alphabet (Korean) - b"\xE1\x84\x80", // ㄱ (Giyeok, 211) - b"\xE1\x84\x81", // ㄴ (Nieun, 212) - b"\xE1\x84\x82", // ㄷ (Digeut, 213) - b"\xE1\x84\x83", // ㄹ (Rieul, 214) - b"\xE1\x84\x84", // ㅁ (Mieum, 215) - b"\xE1\x84\x85", // ㅂ (Bieup, 216) - b"\xE1\x84\x86", // ㅅ (Siot, 217) - b"\xE1\x84\x87", // ㅇ (Ieung, 218) - b"\xE1\x84\x88", // ㅈ (Jieut, 219) - b"\xE1\x84\x89", // ㅊ (Chieut, 220) - b"\xE1\x84\x8A", // ㅋ (Kieuk, 221) - b"\xE1\x84\x8B", // ㅌ (Tieut, 222) - b"\xE1\x84\x8C", // ㅍ (Pieup, 223) - b"\xE1\x84\x8D", // ㅎ (Hieut, 224) + b"\xE3\x84\xB1", // ㄱ (Giyeok, 211) + b"\xE3\x84\xB4", // ㄴ (Nieun, 212) + b"\xE3\x84\xB7", // ㄷ (Digeut, 213) + b"\xE3\x84\xB9", // ㄹ (Rieul, 214) + b"\xE3\x85\x81", // ㅁ (Mieum, 215) + b"\xE3\x85\x82", // ㅂ (Bieup, 216) + b"\xE3\x85\x85", // ㅅ (Siot, 217) + b"\xE3\x85\x87", // ㅇ (Ieung, 218) + b"\xE3\x85\x88", // ㅈ (Jieut, 219) + b"\xE3\x85\x8A", // ㅊ (Chieut, 220) + b"\xE3\x85\x8B", // ㅋ (Kieuk, 221) + b"\xE3\x85\x8C", // ㅌ (Tieut, 222) + b"\xE3\x85\x8D", // ㅍ (Pieup, 223) + b"\xE3\x85\x8E", // ㅎ (Hieut, 224) // Hangul Vowels - b"\xE1\x85\xA1", // ㅏ (A, 225) - b"\xE1\x85\xA2", // ㅐ (Ae, 226) - b"\xE1\x85\xA3", // ㅑ (Ya, 227) - b"\xE1\x85\xA4", // ㅒ (Yae, 228) - b"\xE1\x85\xA5", // ㅓ (Eo, 229) - b"\xE1\x85\xA6", // ㅔ (E, 230) - b"\xE1\x85\xA7", // ㅕ (Yeo, 231) - b"\xE1\x85\xA8", // ㅖ (Ye, 232) - b"\xE1\x85\xA9", // ㅗ (O, 233) - b"\xE1\x85\xAA", // ㅘ (Wa, 234) - b"\xE1\x85\xAB", // ㅙ (Wae, 235) - b"\xE1\x85\xAC", // ㅚ (Oe, 236) - b"\xE1\x85\xAD", // ㅛ (Yo, 237) - b"\xE1\x85\xAE", // ㅜ (U, 238) - b"\xE1\x85\xAF", // ㅝ (Weo, 239) - b"\xE1\x85\xB0", // ㅞ (We, 240) - b"\xE1\x85\xB1", // ㅟ (Wi, 241) - b"\xE1\x85\xB2", // ㅠ (Yu, 242) - b"\xE1\x85\xB3", // ㅡ (Eu, 243) - b"\xE1\x85\xB4", // ㅢ (Ui, 244) - b"\xE1\x85\xB5", // ㅣ (I, 245) + b"\xE3\x85\x8F", // ㅏ (A, 225) + b"\xE3\x85\x90", // ㅐ (Ae, 226) + b"\xE3\x85\x91", // ㅑ (Ya, 227) + b"\xE3\x85\x92", // ㅒ (Yae, 228) + b"\xE3\x85\x93", // ㅓ (Eo, 229) + b"\xE3\x85\x94", // ㅔ (E, 230) + b"\xE3\x85\x95", // ㅕ (Yeo, 231) + b"\xE3\x85\x96", // ㅖ (Ye, 232) + b"\xE3\x85\x97", // ㅗ (O, 233) + b"\xE3\x85\x98", // ㅘ (Wa, 234) + b"\xE3\x85\x99", // ㅙ (Wae, 235) + b"\xE3\x85\x9A", // ㅚ (Oe, 236) + b"\xE3\x85\x9B", // ㅛ (Yo, 237) + b"\xE3\x85\x9C", // ㅜ (U, 238) + b"\xE3\x85\x9D", // ㅝ (Weo, 239) + b"\xE3\x85\x9E", // ㅞ (We, 240) + b"\xE3\x85\x9F", // ㅟ (Wi, 241) + b"\xE3\x85\xA0", // ㅠ (Yu, 242) + b"\xE3\x85\xA1", // ㅡ (Eu, 243) + b"\xE3\x85\xA2", // ㅢ (Ui, 244) + b"\xE3\x85\xA3", // ㅣ (I, 245) // Ethiopic Alphabet b"\xE1\x8A\xA0", // አ (Glottal A, 246) b"\xE1\x8A\xA1", // ኡ (Glottal U, 247) @@ -290,13 +290,13 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE1\x8A\xB4", // ኴ (Ke, 265) b"\xE1\x8A\xB5", // ኵ (Kwe, 266) b"\xE1\x8A\xB6", // ኶ (Ko, 267) - b"\xE1\x8A\x90", // ጐ (Go, 268) - b"\xE1\x8A\x91", // ጑ (Gu, 269) - b"\xE1\x8A\x92", // ጒ (Gi, 270) - b"\xE1\x8A\x93", // መ (Gua, 271) - b"\xE1\x8A\x94", // ጔ (Ge, 272) - b"\xE1\x8A\x95", // ጕ (Gwe, 273) - b"\xE1\x8A\x96", // ጖ (Go, 274) + b"\xE1\x8C\x90", // ጐ (Go, 268) + b"\xE1\x8C\x91", // ጑ (Gu, 269) + b"\xE1\x8C\x92", // ጒ (Gi, 270) + b"\xE1\x88\x98", // መ (Gua, 271) + b"\xE1\x8C\x94", // ጔ (Ge, 272) + b"\xE1\x8C\x95", // ጕ (Gwe, 273) + b"\xE1\x8C\x96", // ጖ (Go, 274) // Devanagari Alphabet b"\xE0\xA4\x85", // अ (A, 275) b"\xE0\xA4\x86", // आ (Aa, 276) @@ -429,12 +429,12 @@ pub static SYMBOLS: [&[u8]; 439] = [ // Sinhala Alphabet b"\xE0\xB6\x85", // අ (A, 401) b"\xE0\xB6\x86", // ආ (Aa, 402) - b"\xE0\xB6\x87", // ඉ (I, 403) - b"\xE0\xB6\x88", // ඊ (Ii, 404) - b"\xE0\xB6\x89", // උ (U, 405) - b"\xE0\xB6\x8A", // ඌ (Uu, 406) - b"\xE0\xB6\x8B", // ඍ (R, 407) - b"\xE0\xB6\x8C", // ඎ (Rr, 408) + b"\xE0\xB6\x89", // ඉ (I, 403) + b"\xE0\xB6\x8A", // ඊ (Ii, 404) + b"\xE0\xB6\x8B", // උ (U, 405) + b"\xE0\xB6\x8C", // ඌ (Uu, 406) + b"\xE0\xB6\x8D", // ඍ (R, 407) + b"\xE0\xB6\x8E", // ඎ (Rr, 408) b"\xE0\xB6\x8F", // ඏ (L, 409) b"\xE0\xB6\x90", // ඐ (Ll, 410) b"\xE0\xB6\x91", // එ (E, 411) @@ -448,12 +448,12 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB6\x9C", // ග (Ga, 419) b"\xE0\xB6\x9D", // ඝ (Gha, 420) b"\xE0\xB6\x9E", // ඞ (Nga, 421) - b"\xE0\xB6\x9F", // ච (Cha, 422) - b"\xE0\xB6\xA0", // ඡ (Chha, 423) - b"\xE0\xB6\xA1", // ජ (Ja, 424) - b"\xE0\xB6\xA2", // ඣ (Jha, 425) - b"\xE0\xB6\xA3", // ඤ (Nya, 426) - b"\xE0\xB6\xA4", // ට (Ta, 427) + b"\xE0\xB6\xA0", // ච (Cha, 422) + b"\xE0\xB6\xA1", // ඡ (Chha, 423) + b"\xE0\xB6\xA2", // ජ (Ja, 424) + b"\xE0\xB6\xA3", // ඣ (Jha, 425) + b"\xE0\xB6\xA4", // ඤ (Nya, 426) + b"\xE0\xB6\xA7", // ට (Ta, 427) b"\xE0\xB6\xA5", // ඥ (Tha, 428) b"\xE0\xB6\xA6", // ඦ (Da, 429) b"\xE0\xB6\xA7", // ට (Dha, 430) From 3e8e641eeeb6704804c878f2ca14ce707bf44b49 Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 00:05:53 +0200 Subject: [PATCH 174/379] Reverted Tau change --- pallets/subtensor/src/subnets/symbols.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index c667fa6424..a07798535a 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -26,7 +26,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xCF\x80", // π (Pi, 16) b"\xCF\x81", // ρ (Rho, 17) b"\xCF\x83", // σ (Sigma, 18) - b"\x74", // t (Tau, 19) + b"t", // t (Tau, 19) b"\xCF\x85", // υ (Upsilon, 20) b"\xCF\x86", // φ (Phi, 21) b"\xCF\x87", // χ (Chi, 22) From a46eadcea3088de5285a4660207f1e8c9b4f1887 Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 01:17:07 +0200 Subject: [PATCH 175/379] cargo clippy --- pallets/subtensor/src/subnets/symbols.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index a07798535a..0e9972f62e 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -453,7 +453,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB6\xA2", // ජ (Ja, 424) b"\xE0\xB6\xA3", // ඣ (Jha, 425) b"\xE0\xB6\xA4", // ඤ (Nya, 426) - b"\xE0\xB6\xA7", // ට (Ta, 427) + b"\xE0\xB6\xA4", // ට (Ta, 427) b"\xE0\xB6\xA5", // ඥ (Tha, 428) b"\xE0\xB6\xA6", // ඦ (Da, 429) b"\xE0\xB6\xA7", // ට (Dha, 430) From 48caff402ac42bb009ff7c06bd047606c60282cc Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 01:18:12 +0200 Subject: [PATCH 176/379] cargo fix --- pallets/subtensor/src/subnets/symbols.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index 0e9972f62e..a07798535a 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -453,7 +453,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB6\xA2", // ජ (Ja, 424) b"\xE0\xB6\xA3", // ඣ (Jha, 425) b"\xE0\xB6\xA4", // ඤ (Nya, 426) - b"\xE0\xB6\xA4", // ට (Ta, 427) + b"\xE0\xB6\xA7", // ට (Ta, 427) b"\xE0\xB6\xA5", // ඥ (Tha, 428) b"\xE0\xB6\xA6", // ඦ (Da, 429) b"\xE0\xB6\xA7", // ට (Dha, 430) From 5dc32fd1a4bf15cf29799d44f9675b79024c6674 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 20:48:51 -0300 Subject: [PATCH 177/379] added max immune percentage --- pallets/admin-utils/src/tests/mock.rs | 3 + pallets/admin-utils/src/tests/mod.rs | 95 +++++++++++++++++++++++ pallets/subtensor/src/macros/config.rs | 3 + pallets/subtensor/src/subnets/uids.rs | 15 ++++ pallets/subtensor/src/tests/mock.rs | 2 + pallets/transaction-fee/src/tests/mock.rs | 2 + runtime/src/lib.rs | 2 + 7 files changed, 122 insertions(+) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index be2052a212..9a0ffb6896 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -16,6 +16,7 @@ use sp_runtime::{ BuildStorage, KeyTypeId, Perbill, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup}, + Percent, }; use sp_std::cmp::Ordering; use sp_weights::Weight; @@ -151,6 +152,7 @@ parameter_types! { pub const InitialKeySwapOnSubnetCost: u64 = 10_000_000; pub const HotkeySwapOnSubnetInterval: u64 = 7 * 24 * 60 * 60 / 12; // 7 days pub const LeaseDividendsDistributionInterval: u32 = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Test { @@ -228,6 +230,7 @@ impl pallet_subtensor::Config for Test { type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; } parameter_types! { diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index b6dff69ef4..e9187cb437 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2296,6 +2296,101 @@ fn test_trim_to_max_allowed_uids() { }); } +#[test] +fn test_trim_to_max_allowed_uids_too_many_immune() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let sn_owner = U256::from(1); + add_network(netuid, 10); + SubnetOwner::::insert(netuid, sn_owner); + MaxRegistrationsPerBlock::::insert(netuid, 256); + TargetRegistrationsPerInterval::::insert(netuid, 256); + ImmuneOwnerUidsLimit::::insert(netuid, 2); + MinAllowedUids::::set(netuid, 4); + + // Add 5 neurons + let max_n = 5; + for i in 1..=max_n { + let n = i * 1000; + register_ok_neuron(netuid, U256::from(n), U256::from(n + i), 0); + } + + // Run some blocks to ensure stake weights are set + run_to_block((ImmunityPeriod::::get(netuid) + 1).into()); + + // Set owner immune uids (2 UIDs) by adding them to OwnedHotkeys + let owner_hotkey1 = U256::from(1000); + let owner_hotkey2 = U256::from(2000); + OwnedHotkeys::::insert(sn_owner, vec![owner_hotkey1, owner_hotkey2]); + Keys::::insert(netuid, 0, owner_hotkey1); + Uids::::insert(netuid, owner_hotkey1, 0); + Keys::::insert(netuid, 1, owner_hotkey2); + Uids::::insert(netuid, owner_hotkey2, 1); + + // Set temporally immune uids (2 UIDs) to make total immune count 4 out of 5 (80%) + // Set their registration block to current block to make them temporally immune + let current_block = frame_system::Pallet::::block_number(); + for uid in 2..4 { + let hotkey = U256::from(uid * 1000 + 1000); + Keys::::insert(netuid, uid, hotkey); + Uids::::insert(netuid, hotkey, uid); + BlockAtRegistration::::insert(netuid, uid, current_block); + } + + // Try to trim to 4 UIDs - this should fail because 4/4 = 100% immune (>= 80%) + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 4 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Try to trim to 3 UIDs - this should also fail because 4/3 > 80% immune (>= 80%) + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 3 + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Now test a scenario where trimming should succeed + // Remove one immune UID to make it 3 immune out of 4 total + let uid_to_remove = 3; + let hotkey_to_remove = U256::from(uid_to_remove * 1000 + 1000); + #[allow(unknown_lints)] + Keys::::remove(netuid, uid_to_remove); + Uids::::remove(netuid, hotkey_to_remove); + BlockAtRegistration::::remove(netuid, uid_to_remove); + + // Now we have 3 immune out of 4 total UIDs + // Try to trim to 3 UIDs - this should succeed because 3/3 = 100% immune, but that's exactly 80% + // Wait, 100% is > 80%, so this should fail. Let me test with a scenario where we have fewer immune UIDs + + // Remove another immune UID to make it 2 immune out of 3 total + let uid_to_remove2 = 2; + let hotkey_to_remove2 = U256::from(uid_to_remove2 * 1000 + 1000); + #[allow(unknown_lints)] + Keys::::remove(netuid, uid_to_remove2); + Uids::::remove(netuid, hotkey_to_remove2); + BlockAtRegistration::::remove(netuid, uid_to_remove2); + + // Now we have 2 immune out of 2 total UIDs + // Try to trim to 1 UID - this should fail because 2/1 is impossible, but the check prevents it + assert_err!( + AdminUtils::sudo_trim_to_max_allowed_uids( + <::RuntimeOrigin>::root(), + netuid, + 1 + ), + pallet_subtensor::Error::::InvalidValue + ); + }); +} + #[test] fn test_sudo_set_min_allowed_uids() { new_test_ext().execute_with(|| { diff --git a/pallets/subtensor/src/macros/config.rs b/pallets/subtensor/src/macros/config.rs index 6bcb382631..cb6af29728 100644 --- a/pallets/subtensor/src/macros/config.rs +++ b/pallets/subtensor/src/macros/config.rs @@ -254,5 +254,8 @@ mod config { /// Number of blocks between dividends distribution. #[pallet::constant] type LeaseDividendsDistributionInterval: Get>; + /// Maximum percentage of immune UIDs. + #[pallet::constant] + type MaxImmuneUidsPercentage: Get; } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 964d17a136..937d40abd1 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -1,5 +1,6 @@ use super::*; use frame_support::storage::IterableStorageDoubleMap; +use sp_runtime::Percent; use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use sp_std::{cmp, vec}; use subtensor_runtime_common::NetUid; @@ -131,6 +132,20 @@ impl Pallet { let current_n = Self::get_subnetwork_n(netuid); if current_n > max_n { + // Count the number of immune UIDs + let mut immune_count = 0; + for uid in 0..current_n { + if owner_uids.contains(&(uid as u16)) + || Self::get_neuron_is_immune(netuid, uid as u16) + { + immune_count += 1; + } + } + + // Ensure the number of immune UIDs is less than 80% + let immune_percentage = Percent::from_rational(immune_count, max_n); + ensure!(immune_percentage < T::MaxImmuneUidsPercentage::get(), Error::::InvalidValue); + // Get all emissions with their UIDs and sort by emission (descending) // This ensures we keep the highest emitters and remove the lowest ones let mut emissions = Emission::::get(netuid) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 300a65a436..d91ee9c8af 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -223,6 +223,7 @@ parameter_types! { pub const HotkeySwapOnSubnetInterval: u64 = 15; // 15 block, should be bigger than subnet number, then trigger clean up for all subnets pub const MaxContributorsPerLeaseToRemove: u32 = 3; pub const LeaseDividendsDistributionInterval: u32 = 100; + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } // Configure collective pallet for council @@ -459,6 +460,7 @@ impl crate::Config for Test { type ProxyInterface = FakeProxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; } // Swap-related parameter types diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index c16fc87d82..ee3da7f0a4 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -216,6 +216,7 @@ parameter_types! { pub const InitialKeySwapOnSubnetCost: u64 = 10_000_000; pub const HotkeySwapOnSubnetInterval: u64 = 7 * 24 * 60 * 60 / 12; // 7 days pub const LeaseDividendsDistributionInterval: u32 = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Test { @@ -293,6 +294,7 @@ impl pallet_subtensor::Config for Test { type ProxyInterface = (); type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = (); + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; } parameter_types! { diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index ef470c048c..bdf97c29ca 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1169,6 +1169,7 @@ parameter_types! { pub const SubtensorInitialKeySwapOnSubnetCost: u64 = 1_000_000; // 0.001 TAO pub const HotkeySwapOnSubnetInterval : BlockNumber = 5 * 24 * 60 * 60 / 12; // 5 days pub const LeaseDividendsDistributionInterval: BlockNumber = 100; // 100 blocks + pub const MaxImmuneUidsPercentage: Percent = Percent::from_percent(80); } impl pallet_subtensor::Config for Runtime { @@ -1246,6 +1247,7 @@ impl pallet_subtensor::Config for Runtime { type ProxyInterface = Proxier; type LeaseDividendsDistributionInterval = LeaseDividendsDistributionInterval; type GetCommitments = GetCommitmentsStruct; + type MaxImmuneUidsPercentage = MaxImmuneUidsPercentage; } parameter_types! { From 5a55def9bcaddb414e99ea04be912e1796c2875d Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 20:52:43 -0300 Subject: [PATCH 178/379] fix imports --- pallets/subtensor/src/tests/mock.rs | 1 + pallets/transaction-fee/src/tests/mock.rs | 1 + runtime/src/lib.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index d91ee9c8af..01246afeba 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -21,6 +21,7 @@ use sp_runtime::Perbill; use sp_runtime::{ BuildStorage, traits::{BlakeTwo256, IdentityLookup}, + Percent, }; use sp_std::{cell::RefCell, cmp::Ordering}; use subtensor_runtime_common::{NetUid, TaoCurrency}; diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index ee3da7f0a4..99840b789f 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -18,6 +18,7 @@ use sp_runtime::{ BuildStorage, KeyTypeId, Perbill, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup, One}, + Percent, }; use sp_std::cmp::Ordering; use sp_weights::Weight; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index bdf97c29ca..da21c582fe 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -50,6 +50,7 @@ use sp_core::{ use sp_runtime::Cow; use sp_runtime::generic::Era; use sp_runtime::{ + Percent, AccountId32, ApplyExtrinsicResult, ConsensusEngineId, generic, impl_opaque_keys, traits::{ AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, One, From 0a033b2866a5ccbb2850291252ab03e6d83762be Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 20:54:31 -0300 Subject: [PATCH 179/379] cargo clippy --- pallets/subtensor/src/subnets/uids.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 937d40abd1..c477b6e7d5 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -135,8 +135,8 @@ impl Pallet { // Count the number of immune UIDs let mut immune_count = 0; for uid in 0..current_n { - if owner_uids.contains(&(uid as u16)) - || Self::get_neuron_is_immune(netuid, uid as u16) + if owner_uids.contains(&{ uid }) + || Self::get_neuron_is_immune(netuid, uid) { immune_count += 1; } From 246344131e7e5a99a9bf3b2b1870f0e2021332db Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 20:55:49 -0300 Subject: [PATCH 180/379] commit Cargo.lock --- pallets/subtensor/src/subnets/uids.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index c477b6e7d5..f37e6224b0 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -133,12 +133,12 @@ impl Pallet { if current_n > max_n { // Count the number of immune UIDs - let mut immune_count = 0; + let mut immune_count: u16 = 0; for uid in 0..current_n { if owner_uids.contains(&{ uid }) || Self::get_neuron_is_immune(netuid, uid) { - immune_count += 1; + immune_count = immune_count.saturating_add(1); } } From 9e27dd0fbbdc1e04e5a5bc5eadec52bc18d8d5a5 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 9 Sep 2025 20:58:02 -0300 Subject: [PATCH 181/379] cargo fmt --- pallets/admin-utils/src/tests/mock.rs | 3 +-- pallets/subtensor/src/subnets/uids.rs | 9 +++++---- pallets/subtensor/src/tests/mock.rs | 3 +-- pallets/transaction-fee/src/tests/mock.rs | 3 +-- runtime/src/lib.rs | 3 +-- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pallets/admin-utils/src/tests/mock.rs b/pallets/admin-utils/src/tests/mock.rs index 9a0ffb6896..e9a274470a 100644 --- a/pallets/admin-utils/src/tests/mock.rs +++ b/pallets/admin-utils/src/tests/mock.rs @@ -13,10 +13,9 @@ use sp_consensus_grandpa::AuthorityList as GrandpaAuthorityList; use sp_core::U256; use sp_core::{ConstU64, H256}; use sp_runtime::{ - BuildStorage, KeyTypeId, Perbill, + BuildStorage, KeyTypeId, Perbill, Percent, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup}, - Percent, }; use sp_std::cmp::Ordering; use sp_weights::Weight; diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index f37e6224b0..aaafaf8edc 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -135,16 +135,17 @@ impl Pallet { // Count the number of immune UIDs let mut immune_count: u16 = 0; for uid in 0..current_n { - if owner_uids.contains(&{ uid }) - || Self::get_neuron_is_immune(netuid, uid) - { + if owner_uids.contains(&{ uid }) || Self::get_neuron_is_immune(netuid, uid) { immune_count = immune_count.saturating_add(1); } } // Ensure the number of immune UIDs is less than 80% let immune_percentage = Percent::from_rational(immune_count, max_n); - ensure!(immune_percentage < T::MaxImmuneUidsPercentage::get(), Error::::InvalidValue); + ensure!( + immune_percentage < T::MaxImmuneUidsPercentage::get(), + Error::::InvalidValue + ); // Get all emissions with their UIDs and sort by emission (descending) // This ensures we keep the highest emitters and remove the lowest ones diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 01246afeba..f45b36dbc4 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -19,9 +19,8 @@ use pallet_collective::MemberCount; use sp_core::{ConstU64, Get, H256, U256, offchain::KeyTypeId}; use sp_runtime::Perbill; use sp_runtime::{ - BuildStorage, + BuildStorage, Percent, traits::{BlakeTwo256, IdentityLookup}, - Percent, }; use sp_std::{cell::RefCell, cmp::Ordering}; use subtensor_runtime_common::{NetUid, TaoCurrency}; diff --git a/pallets/transaction-fee/src/tests/mock.rs b/pallets/transaction-fee/src/tests/mock.rs index 99840b789f..8c6a064ef6 100644 --- a/pallets/transaction-fee/src/tests/mock.rs +++ b/pallets/transaction-fee/src/tests/mock.rs @@ -15,10 +15,9 @@ pub use pallet_subtensor::*; pub use sp_core::U256; use sp_core::{ConstU64, H256}; use sp_runtime::{ - BuildStorage, KeyTypeId, Perbill, + BuildStorage, KeyTypeId, Perbill, Percent, testing::TestXt, traits::{BlakeTwo256, ConstU32, IdentityLookup, One}, - Percent, }; use sp_std::cmp::Ordering; use sp_weights::Weight; diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index da21c582fe..0c17e9b11b 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -50,8 +50,7 @@ use sp_core::{ use sp_runtime::Cow; use sp_runtime::generic::Era; use sp_runtime::{ - Percent, - AccountId32, ApplyExtrinsicResult, ConsensusEngineId, generic, impl_opaque_keys, + AccountId32, ApplyExtrinsicResult, ConsensusEngineId, Percent, generic, impl_opaque_keys, traits::{ AccountIdLookup, BlakeTwo256, Block as BlockT, DispatchInfoOf, Dispatchable, One, PostDispatchInfoOf, UniqueSaturatedInto, Verify, From 7626e0caa40b443fe2f2da9256be86225da9cc44 Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 10 Sep 2025 13:06:32 +0800 Subject: [PATCH 182/379] commit Cargo.lock --- pallets/subtensor/src/macros/dispatches.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 526ecca278..f069ff0aa5 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -120,15 +120,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] -<<<<<<< HEAD - #[pallet::weight((Weight::from_parts(18_930_000, 0) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] -======= #[pallet::weight((Weight::from_parts(95_460_000, 0) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] ->>>>>>> devnet-ready pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, From 0a489b098aa8c8d350bea7f2b2584fd770fd36ef Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 10 Sep 2025 16:14:58 +0800 Subject: [PATCH 183/379] refactor alpha test --- evm-tests/test/alpha.precompile.test.ts | 63 +++++-------------------- 1 file changed, 11 insertions(+), 52 deletions(-) diff --git a/evm-tests/test/alpha.precompile.test.ts b/evm-tests/test/alpha.precompile.test.ts index 1ca3c755af..9c1a5daa8e 100644 --- a/evm-tests/test/alpha.precompile.test.ts +++ b/evm-tests/test/alpha.precompile.test.ts @@ -1,15 +1,14 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, waitForTransactionCompletion, convertPublicKeyToMultiAddress, getRandomSubstrateKeypair, getSignerFromKeypair } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { getPublicClient } from "../src/utils"; -import { ETH_LOCAL_URL, SUB_LOCAL_URL } from "../src/config"; +import { ETH_LOCAL_URL } from "../src/config"; import { devnet } from "@polkadot-api/descriptors" import { PublicClient } from "viem"; -import { PolkadotSigner, TypedApi } from "polkadot-api"; +import { TypedApi } from "polkadot-api"; import { toViemAddress, convertPublicKeyToSs58 } from "../src/address-utils" import { IAlphaABI, IALPHA_ADDRESS } from "../src/contracts/alpha" -import { u64 } from "@polkadot-api/substrate-bindings"; - +import { forceSetBalanceToSs58Address, addNewSubnetwork, startCall } from "../src/subtensor"; describe("Test Alpha Precompile", () => { // init substrate part const hotkey = getRandomSubstrateKeypair(); @@ -18,9 +17,6 @@ describe("Test Alpha Precompile", () => { let api: TypedApi; - // sudo account alice as signer - let alice: PolkadotSigner; - // init other variable let subnetId = 0; @@ -28,50 +24,13 @@ describe("Test Alpha Precompile", () => { // init variables got from await and async publicClient = await getPublicClient(ETH_LOCAL_URL) api = await getDevnetApi() - alice = await getAliceSigner(); - - // Fund the hotkey account - { - const multiAddress = convertPublicKeyToMultiAddress(hotkey.publicKey) - const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) - const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } - - // Fund the coldkey account - { - const multiAddress = convertPublicKeyToMultiAddress(coldkey.publicKey) - const internalCall = api.tx.Balances.force_set_balance({ who: multiAddress, new_free: BigInt(1e12) }) - const tx = api.tx.Sudo.sudo({ call: internalCall.decodedCall }) - - await waitForTransactionCompletion(api, tx, alice) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } - - // Register a new subnet - const signer = getSignerFromKeypair(coldkey) - const registerNetworkTx = api.tx.SubtensorModule.register_network({ hotkey: convertPublicKeyToSs58(hotkey.publicKey) }) - await waitForTransactionCompletion(api, registerNetworkTx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - - // Get the newly created subnet ID - let totalNetworks = await api.query.SubtensorModule.TotalNetworks.getValue() - assert.ok(totalNetworks > 1) - subnetId = totalNetworks - 1 - - // Register a neuron on the subnet if needed - let uid_count = await api.query.SubtensorModule.SubnetworkN.getValue(subnetId) - if (uid_count === 0) { - const tx = api.tx.SubtensorModule.burned_register({ hotkey: convertPublicKeyToSs58(hotkey.publicKey), netuid: subnetId }) - await waitForTransactionCompletion(api, tx, signer) - .then(() => { }) - .catch((error) => { console.log(`transaction error ${error}`) }); - } + + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey.publicKey)) + await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(coldkey.publicKey)) + + let netuid = await addNewSubnetwork(api, hotkey, coldkey) + await startCall(api, netuid, coldkey) + }) describe("Alpha Price Functions", () => { From e6fd948d3a156969aae018a813fe0d51b71c154a Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 09:55:22 -0300 Subject: [PATCH 184/379] restore clear_neuron --- pallets/subtensor/src/subnets/uids.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index aaafaf8edc..36e3abaaa0 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -67,6 +67,17 @@ impl Pallet { Axons::::remove(netuid, old_hotkey); } + /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { + let neuron_index: usize = neuron_uid.into(); + Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); + Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. + } + /// Appends the uid to the network. pub fn append_neuron(netuid: NetUid, new_hotkey: &T::AccountId, block_number: u64) { // 1. Get the next uid. This is always equal to subnetwork_n. @@ -98,18 +109,6 @@ impl Pallet { IsNetworkMember::::insert(new_hotkey.clone(), netuid, true); // Fill network is member. } - /// Clears (sets to default) the neuron map values fot a neuron when it is - /// removed from the subnet - pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { - let neuron_index: usize = neuron_uid.into(); - Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); - Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. - } - pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { // Reasonable limits ensure!( From f0b71885c8bd8fb0a93360f8b910430424af492c Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 10:01:32 -0300 Subject: [PATCH 185/379] restore clear_neuron 2 --- pallets/subtensor/src/subnets/uids.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 36e3abaaa0..5c5de832bd 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -18,6 +18,17 @@ impl Pallet { } } + /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { + let neuron_index: usize = neuron_uid.into(); + Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); + Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. + } + /// Replace the neuron under this uid. pub fn replace_neuron( netuid: NetUid, @@ -67,17 +78,6 @@ impl Pallet { Axons::::remove(netuid, old_hotkey); } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default - pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { - let neuron_index: usize = neuron_uid.into(); - Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); - Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. - } - /// Appends the uid to the network. pub fn append_neuron(netuid: NetUid, new_hotkey: &T::AccountId, block_number: u64) { // 1. Get the next uid. This is always equal to subnetwork_n. From 4eaf67a3561175ba68ed56da1a9de6be6b4e4a10 Mon Sep 17 00:00:00 2001 From: open-junius Date: Wed, 10 Sep 2025 21:34:01 +0800 Subject: [PATCH 186/379] bump version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index aee1e04895..6f3929de00 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 15ccf67abcdf47f2572aaff1fc411f691cc52c97 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 10:36:47 -0300 Subject: [PATCH 187/379] cargo fmt --- pallets/subtensor/src/macros/events.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index dc52e624b8..9d44fc58ef 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -435,7 +435,7 @@ mod events { /// UID-indexed array of miner incentive alpha; index equals UID. emissions: Vec, }, - + /// The minimum allowed UIDs for a subnet have been set. MinAllowedUidsSet(NetUid, u16), } From 67e71910340d0a347b3517d1d35406916b49bf9d Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 15:39:26 +0200 Subject: [PATCH 188/379] Renamed 430 --- pallets/subtensor/src/subnets/symbols.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index a07798535a..21a3652fb4 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -456,7 +456,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE0\xB6\xA7", // ට (Ta, 427) b"\xE0\xB6\xA5", // ඥ (Tha, 428) b"\xE0\xB6\xA6", // ඦ (Da, 429) - b"\xE0\xB6\xA7", // ට (Dha, 430) + b"\xE0\xB6\xA9", // ඩ (Dha, 430) b"\xE0\xB6\xA8", // ඨ (Na, 431) b"\xE0\xB6\xAA", // ඪ (Pa, 432) b"\xE0\xB6\xAB", // ණ (Pha, 433) From f6f3fd51639b4d1437262abbecfa5a4ad5d3bace Mon Sep 17 00:00:00 2001 From: bdhimes Date: Wed, 10 Sep 2025 15:53:48 +0200 Subject: [PATCH 189/379] Bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index aee1e04895..6f3929de00 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From fe056c6647b41ed7111710d74cfec3294bf3d456 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 11:38:46 -0400 Subject: [PATCH 190/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e85cc4bb6e..5832b53206 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 314, + spec_version: 315, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 2f43928645080115b612c7ea133c99ecdb276a00 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 11:39:19 -0400 Subject: [PATCH 191/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index f938105bc6..aee1e04895 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 314, + spec_version: 315, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From d55e68c49e906d44bad8d78f7f08ebf61e20873f Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 12:34:46 -0400 Subject: [PATCH 192/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 5832b53206..d49d5147e9 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 02cf4eb3f364483ba58e1b336ffcbfdd868a7303 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 13:45:11 -0400 Subject: [PATCH 193/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index aee1e04895..6f3929de00 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From a027ba7060a769648c6b9bcafd193324dcb1042d Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 16:48:29 -0300 Subject: [PATCH 194/379] fix rate limit + call_index --- pallets/admin-utils/src/lib.rs | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 07d54eee7e..9a0230d32e 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1908,7 +1908,7 @@ pub mod pallet { /// The trimming is done by sorting the UIDs by emission descending and then trimming /// the lowest emitters while preserving temporally and owner immune UIDs. The UIDs are /// then compressed to the left and storage is migrated to the new compressed UIDs. - #[pallet::call_index(74)] + #[pallet::call_index(78)] #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] @@ -1917,24 +1917,25 @@ pub mod pallet { netuid: NetUid, max_n: u16, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin.clone(), netuid)?; - if let Ok(RawOrigin::Signed(who)) = origin.into() { - ensure!( - pallet_subtensor::Pallet::::passes_rate_limit_on_subnet( - &TransactionType::SetMaxAllowedUIDS, - &who, - netuid, - ), - pallet_subtensor::Error::::TxRateLimitExceeded - ); - } + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin.clone(), + netuid, + &[TransactionType::SetMaxAllowedUIDS], + )?; + pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SetMaxAllowedUIDS], + ); Ok(()) } /// The extrinsic sets the minimum allowed UIDs for a subnet. /// It is only callable by the root account. - #[pallet::call_index(75)] + #[pallet::call_index(79)] #[pallet::weight(Weight::from_parts(18_800_000, 0) .saturating_add(::DbWeight::get().reads(2_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] @@ -1956,7 +1957,9 @@ pub mod pallet { min_allowed_uids < pallet_subtensor::Pallet::::get_subnetwork_n(netuid), Error::::MinAllowedUidsGreaterThanCurrentUids ); + pallet_subtensor::Pallet::::set_min_allowed_uids(netuid, min_allowed_uids); + log::debug!( "MinAllowedUidsSet( netuid: {netuid:?} min_allowed_uids: {min_allowed_uids:?} ) " ); From b2b96368f01bb0f15c97c006fb2f0a229a47ca49 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 16:48:44 -0300 Subject: [PATCH 195/379] refacto to handle subsubnet when trimming uids --- pallets/subtensor/src/subnets/uids.rs | 162 +++++++++++++++----------- 1 file changed, 94 insertions(+), 68 deletions(-) diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 7a7b6f67b2..c910c17336 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -148,11 +148,11 @@ impl Pallet { MaxAllowedUids::::insert(netuid, max_n); - let owner = SubnetOwner::::get(netuid); - let owner_uids = BTreeSet::from_iter(Self::get_immune_owner_uids(netuid, &owner)); let current_n = Self::get_subnetwork_n(netuid); - if current_n > max_n { + let owner = SubnetOwner::::get(netuid); + let owner_uids = BTreeSet::from_iter(Self::get_immune_owner_uids(netuid, &owner)); + // Count the number of immune UIDs let mut immune_count: u16 = 0; for uid in 0..current_n { @@ -167,7 +167,8 @@ impl Pallet { immune_percentage < T::MaxImmuneUidsPercentage::get(), Error::::InvalidValue ); - + + // Get all emissions with their UIDs and sort by emission (descending) // This ensures we keep the highest emitters and remove the lowest ones let mut emissions = Emission::::get(netuid) @@ -176,9 +177,9 @@ impl Pallet { .collect::>(); emissions.sort_by_key(|(_, emission)| cmp::Reverse(*emission)); - // Remove uids from the end (lowest emitters) until we reach the new maximum let mut removed_uids = BTreeSet::new(); let mut uids_left_to_process = current_n; + let subsubnets_count = SubsubnetCountCurrent::::get(netuid).into(); // Iterate from the end (lowest emitters) to the beginning for i in (0..current_n).rev() { @@ -187,15 +188,17 @@ impl Pallet { } if let Some((uid, _)) = emissions.get(i as usize).cloned() { + let neuron_uid = uid as u16; + // Skip subnet owner's or temporally immune uids - if owner_uids.contains(&(uid as u16)) - || Self::get_neuron_is_immune(netuid, uid as u16) + if owner_uids.contains(&neuron_uid) + || Self::get_neuron_is_immune(netuid, neuron_uid) { continue; } // Remove hotkey related storage items if hotkey exists - if let Ok(hotkey) = Keys::::try_get(netuid, uid as u16) { + if let Ok(hotkey) = Keys::::try_get(netuid, neuron_uid) { Uids::::remove(netuid, &hotkey); IsNetworkMember::::remove(&hotkey, netuid); LastHotkeyEmissionOnNetuid::::remove(&hotkey, netuid); @@ -208,10 +211,13 @@ impl Pallet { // Remove all storage items associated with this uid #[allow(unknown_lints)] - Keys::::remove(netuid, uid as u16); - BlockAtRegistration::::remove(netuid, uid as u16); - Weights::::remove(netuid, uid as u16); - Bonds::::remove(netuid, uid as u16); + Keys::::remove(netuid, neuron_uid); + BlockAtRegistration::::remove(netuid, neuron_uid); + for subid in 0..subsubnets_count { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Weights::::remove(netuid_index, neuron_uid); + Bonds::::remove(netuid_index, neuron_uid); + } // Remove from emissions array and track as removed emissions.remove(i.into()); @@ -233,9 +239,7 @@ impl Pallet { let trust = Trust::::get(netuid); let active = Active::::get(netuid); let consensus = Consensus::::get(netuid); - let incentive = Incentive::::get(netuid); let dividends = Dividends::::get(netuid); - let lastupdate = LastUpdate::::get(netuid); let pruning_scores = PruningScores::::get(netuid); let vtrust = ValidatorTrust::::get(netuid); let vpermit = ValidatorPermit::::get(netuid); @@ -243,32 +247,28 @@ impl Pallet { // Create trimmed arrays by extracting values for kept uids only // Pre-allocate vectors with exact capacity for efficiency - let mut trimmed_ranks = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_trust = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_active = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_consensus = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_dividends = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_pruning_scores = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_vtrust = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_vpermit = Vec::with_capacity(trimmed_uids.len()); - let mut trimmed_stake_weight = Vec::with_capacity(trimmed_uids.len()); + let len = trimmed_uids.len(); + let mut trimmed_ranks = Vec::with_capacity(len); + let mut trimmed_trust = Vec::with_capacity(len); + let mut trimmed_active = Vec::with_capacity(len); + let mut trimmed_consensus = Vec::with_capacity(len); + let mut trimmed_dividends = Vec::with_capacity(len); + let mut trimmed_pruning_scores = Vec::with_capacity(len); + let mut trimmed_vtrust = Vec::with_capacity(len); + let mut trimmed_vpermit = Vec::with_capacity(len); + let mut trimmed_stake_weight = Vec::with_capacity(len); // Single iteration to extract values for all kept uids - for &old_uid in &trimmed_uids { - trimmed_ranks.push(ranks.get(old_uid).cloned().unwrap_or_default()); - trimmed_trust.push(trust.get(old_uid).cloned().unwrap_or_default()); - trimmed_active.push(active.get(old_uid).cloned().unwrap_or_default()); - trimmed_consensus.push(consensus.get(old_uid).cloned().unwrap_or_default()); - trimmed_incentive.push(incentive.get(old_uid).cloned().unwrap_or_default()); - trimmed_dividends.push(dividends.get(old_uid).cloned().unwrap_or_default()); - trimmed_lastupdate.push(lastupdate.get(old_uid).cloned().unwrap_or_default()); - trimmed_pruning_scores - .push(pruning_scores.get(old_uid).cloned().unwrap_or_default()); - trimmed_vtrust.push(vtrust.get(old_uid).cloned().unwrap_or_default()); - trimmed_vpermit.push(vpermit.get(old_uid).cloned().unwrap_or_default()); - trimmed_stake_weight.push(stake_weight.get(old_uid).cloned().unwrap_or_default()); + for &uid in &trimmed_uids { + trimmed_ranks.push(ranks.get(uid).cloned().unwrap_or_default()); + trimmed_trust.push(trust.get(uid).cloned().unwrap_or_default()); + trimmed_active.push(active.get(uid).cloned().unwrap_or_default()); + trimmed_consensus.push(consensus.get(uid).cloned().unwrap_or_default()); + trimmed_dividends.push(dividends.get(uid).cloned().unwrap_or_default()); + trimmed_pruning_scores.push(pruning_scores.get(uid).cloned().unwrap_or_default()); + trimmed_vtrust.push(vtrust.get(uid).cloned().unwrap_or_default()); + trimmed_vpermit.push(vpermit.get(uid).cloned().unwrap_or_default()); + trimmed_stake_weight.push(stake_weight.get(uid).cloned().unwrap_or_default()); } // Update storage with trimmed arrays @@ -277,14 +277,29 @@ impl Pallet { Trust::::insert(netuid, trimmed_trust); Active::::insert(netuid, trimmed_active); Consensus::::insert(netuid, trimmed_consensus); - Incentive::::insert(netuid, trimmed_incentive); Dividends::::insert(netuid, trimmed_dividends); - LastUpdate::::insert(netuid, trimmed_lastupdate); PruningScores::::insert(netuid, trimmed_pruning_scores); ValidatorTrust::::insert(netuid, trimmed_vtrust); ValidatorPermit::::insert(netuid, trimmed_vpermit); StakeWeight::::insert(netuid, trimmed_stake_weight); + // Update incentives/lastupdates for subsubnets + for subid in 0..subsubnets_count { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let incentive = Incentive::::get(netuid_index); + let lastupdate = LastUpdate::::get(netuid_index); + let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); + let mut trimmed_lastupdate = Vec::with_capacity(trimmed_uids.len()); + + for uid in &trimmed_uids { + trimmed_incentive.push(incentive.get(*uid).cloned().unwrap_or_default()); + trimmed_lastupdate.push(lastupdate.get(*uid).cloned().unwrap_or_default()); + } + + Incentive::::insert(netuid_index, trimmed_incentive); + LastUpdate::::insert(netuid_index, trimmed_lastupdate); + } + // Create mapping from old uid to new compressed uid // This is needed to update connections (weights and bonds) with correct uid references let old_to_new_uid: BTreeMap = trimmed_uids @@ -299,35 +314,46 @@ impl Pallet { // 2. Update all connections to reference the new compressed uids // 3. Clear the connections to the trimmed uids for (old_uid, new_uid) in &old_to_new_uid { - // Swap uid specific storage items to new compressed positions - Keys::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); - BlockAtRegistration::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); - - // Swap to new position and remap all target uids - Weights::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); - Weights::::mutate(netuid, *new_uid as u16, |weights| { - weights.retain_mut(|(target_uid, _weight)| { - if let Some(new_target_uid) = old_to_new_uid.get(&(*target_uid as usize)) { - *target_uid = *new_target_uid as u16; - true - } else { - false - } - }) - }); + let old_neuron_uid = *old_uid as u16; + let new_neuron_uid = *new_uid as u16; - // Swap to new position and remap all target uids - Bonds::::swap(netuid, *old_uid as u16, netuid, *new_uid as u16); - Bonds::::mutate(netuid, *new_uid as u16, |bonds| { - bonds.retain_mut(|(target_uid, _bond)| { - if let Some(new_target_uid) = old_to_new_uid.get(&(*target_uid as usize)) { - *target_uid = *new_target_uid as u16; - true - } else { - false - } - }) - }); + // Swap uid specific storage items to new compressed positions + Keys::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + BlockAtRegistration::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); + + for subid in 0..subsubnets_count { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + + // Swap to new position and remap all target uids + Weights::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Weights::::mutate(netuid_index, new_neuron_uid, |weights| { + weights.retain_mut(|(target_uid, _weight)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + + // Swap to new position and remap all target uids + Bonds::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); + Bonds::::mutate(netuid_index, new_neuron_uid, |bonds| { + bonds.retain_mut(|(target_uid, _bond)| { + if let Some(new_target_uid) = + old_to_new_uid.get(&(*target_uid as usize)) + { + *target_uid = *new_target_uid as u16; + true + } else { + false + } + }) + }); + } } // Update the subnet's uid count to reflect the new maximum From 20ef94a394a805b05d072359e0bda6815098ce5f Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Wed, 10 Sep 2025 17:07:38 -0300 Subject: [PATCH 196/379] fixed tests --- pallets/admin-utils/src/tests/mod.rs | 73 +++++++++++++++++---------- pallets/subtensor/src/subnets/uids.rs | 3 +- 2 files changed, 47 insertions(+), 29 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index b727c499ac..bbf66fb1bd 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2288,6 +2288,9 @@ fn test_trim_to_max_allowed_uids() { ImmuneOwnerUidsLimit::::insert(netuid, 2); // We set a low value here to make testing easier MinAllowedUids::::set(netuid, 4); + // We define 4 subsubnets + let subsubnet_count = SubId::from(4); + SubsubnetCountCurrent::::insert(netuid, subsubnet_count); // Add some neurons let max_n = 16; @@ -2322,15 +2325,20 @@ fn test_trim_to_max_allowed_uids() { Rank::::insert(netuid, values.clone()); Trust::::insert(netuid, values.clone()); Consensus::::insert(netuid, values.clone()); - Incentive::::insert(netuid, values.clone()); Dividends::::insert(netuid, values.clone()); - LastUpdate::::insert(netuid, u64_values); PruningScores::::insert(netuid, values.clone()); ValidatorTrust::::insert(netuid, values.clone()); - StakeWeight::::insert(netuid, values); + StakeWeight::::insert(netuid, values.clone()); ValidatorPermit::::insert(netuid, bool_values.clone()); Active::::insert(netuid, bool_values); + for subid in 0..subsubnet_count.into() { + let netuid_index = + SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + Incentive::::insert(netuid_index, values.clone()); + LastUpdate::::insert(netuid_index, u64_values.clone()); + } + // We set some owner immune uids let now = frame_system::Pallet::::block_number(); BlockAtRegistration::::set(netuid, 6, now); @@ -2359,8 +2367,12 @@ fn test_trim_to_max_allowed_uids() { } } - Weights::::insert(netuid, uid, weights); - Bonds::::insert(netuid, uid, bonds); + for subid in 0..subsubnet_count.into() { + let netuid_index = + SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + Weights::::insert(netuid_index, uid, weights.clone()); + Bonds::::insert(netuid_index, uid, bonds.clone()); + } } // Normal case @@ -2397,20 +2409,29 @@ fn test_trim_to_max_allowed_uids() { assert_eq!(Trust::::get(netuid), expected_values); assert_eq!(Active::::get(netuid), expected_bools); assert_eq!(Consensus::::get(netuid), expected_values); - assert_eq!(Incentive::::get(netuid), expected_values); assert_eq!(Dividends::::get(netuid), expected_values); - assert_eq!(LastUpdate::::get(netuid), expected_u64_values); assert_eq!(PruningScores::::get(netuid), expected_values); assert_eq!(ValidatorTrust::::get(netuid), expected_values); assert_eq!(ValidatorPermit::::get(netuid), expected_bools); assert_eq!(StakeWeight::::get(netuid), expected_values); + for subid in 0..subsubnet_count.into() { + let netuid_index = + SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + assert_eq!(Incentive::::get(netuid_index), expected_values); + assert_eq!(LastUpdate::::get(netuid_index), expected_u64_values); + } + // Ensure trimmed uids related storage has been cleared for uid in new_max_n..max_n { assert!(!Keys::::contains_key(netuid, uid)); assert!(!BlockAtRegistration::::contains_key(netuid, uid)); - assert!(!Weights::::contains_key(netuid, uid)); - assert!(!Bonds::::contains_key(netuid, uid)); + for subid in 0..subsubnet_count.into() { + let netuid_index = + SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + assert!(!Weights::::contains_key(netuid_index, uid)); + assert!(!Bonds::::contains_key(netuid_index, uid)); + } } // Ensure trimmed uids hotkey related storage has been cleared @@ -2439,26 +2460,24 @@ fn test_trim_to_max_allowed_uids() { assert!(!Prometheus::::contains_key(netuid, hotkey)); } - // Ensure trimmed uids weights and bonds have been cleared - for uid in new_max_n..max_n { - assert!(!Weights::::contains_key(netuid, uid)); - assert!(!Bonds::::contains_key(netuid, uid)); - } - // Ensure trimmed uids weights and bonds connections have been trimmed correctly for uid in 0..new_max_n { - assert!( - Weights::::get(netuid, uid) - .iter() - .all(|(target_uid, _)| *target_uid < new_max_n), - "Found a weight with target_uid >= new_max_n" - ); - assert!( - Bonds::::get(netuid, uid) - .iter() - .all(|(target_uid, _)| *target_uid < new_max_n), - "Found a bond with target_uid >= new_max_n" - ); + for subid in 0..subsubnet_count.into() { + let netuid_index = + SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + assert!( + Weights::::get(netuid_index, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a weight with target_uid >= new_max_n" + ); + assert!( + Bonds::::get(netuid_index, uid) + .iter() + .all(|(target_uid, _)| *target_uid < new_max_n), + "Found a bond with target_uid >= new_max_n" + ); + } } // Actual number of neurons on the network updated after trimming diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index c910c17336..e01f17cad6 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -167,8 +167,7 @@ impl Pallet { immune_percentage < T::MaxImmuneUidsPercentage::get(), Error::::InvalidValue ); - - + // Get all emissions with their UIDs and sort by emission (descending) // This ensures we keep the highest emitters and remove the lowest ones let mut emissions = Emission::::get(netuid) From a3acec860f4d3a1e9cee1b54bf1de0f3a083224c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 10 Sep 2025 13:40:08 -0700 Subject: [PATCH 197/379] improve logic --- pallets/subtensor/src/coinbase/root.rs | 2 +- pallets/subtensor/src/staking/remove_stake.rs | 33 ++- pallets/subtensor/src/subnets/subnet.rs | 1 - pallets/subtensor/src/tests/networks.rs | 24 ++- pallets/swap/src/pallet/impls.rs | 202 ++---------------- 5 files changed, 58 insertions(+), 204 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 7b4a4e447c..fa16e13189 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -381,7 +381,7 @@ impl Pallet { Self::remove_network(netuid); // 4. --- Emit the NetworkRemoved event - log::debug!("NetworkRemoved( netuid:{netuid:?} )"); + log::info!("NetworkRemoved( netuid:{netuid:?} )"); Self::deposit_event(Event::NetworkRemoved(netuid)); Ok(()) diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index 1169f99406..d3834c283a 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -456,8 +456,7 @@ impl Pallet { // Emission:: is Vec. We: // - sum emitted α, // - apply owner fraction to get owner α, - // - convert owner α to τ using current price, - // - use that τ value for the refund formula. + // - price that α using a *simulated* AMM swap. let total_emitted_alpha_u128: u128 = Emission::::get(netuid) .into_iter() @@ -472,15 +471,27 @@ impl Pallet { .floor() .saturating_to_num::(); - // Current α→τ price (TAO per 1 α) for this subnet. - let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); - - // Convert owner α to τ at current price; floor to integer τ. - let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) - .saturating_mul(cur_price) - .floor() - .saturating_to_num::(); - let owner_emission_tao: TaoCurrency = owner_emission_tao_u64.into(); + let owner_emission_tao: TaoCurrency = if owner_alpha_u64 > 0 { + match T::SwapInterface::sim_swap(netuid.into(), OrderType::Sell, owner_alpha_u64) { + Ok(sim) => TaoCurrency::from(sim.amount_paid_out), + Err(e) => { + log::debug!( + "destroy_alpha_in_out_stakes: sim_swap owner α→τ failed (netuid={:?}, alpha={}, err={:?}); falling back to price multiply.", + netuid, + owner_alpha_u64, + e + ); + let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); + let val_u64: u64 = U96F32::from_num(owner_alpha_u64) + .saturating_mul(cur_price) + .floor() + .saturating_to_num::(); + TaoCurrency::from(val_u64) + } + } + } else { + TaoCurrency::ZERO + }; // 4) Enumerate all α entries on this subnet to build distribution weights and cleanup lists. // - collect keys to remove, diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 3ce40cc366..abaa1e02c5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -213,7 +213,6 @@ impl Pallet { SubnetAlphaIn::::insert(netuid_to_register, pool_initial_alpha); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); - TransferToggle::::insert(netuid_to_register, true); SubnetLocked::::insert(netuid_to_register, pool_initial_tao); LargestLocked::::insert(netuid_to_register, pool_initial_tao.to_u64()); SubnetTaoProvided::::insert(netuid_to_register, TaoCurrency::ZERO); diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index ea1c236149..b70001c2b0 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -7,7 +7,7 @@ use sp_core::U256; use sp_std::collections::btree_map::BTreeMap; use substrate_fixed::types::{I96F32, U64F64, U96F32}; use subtensor_runtime_common::TaoCurrency; -use subtensor_swap_interface::SwapHandler; +use subtensor_swap_interface::{OrderType, SwapHandler}; #[test] fn test_registration_ok() { @@ -859,12 +859,22 @@ fn destroy_alpha_out_many_stakers_complex_distribution() { .floor() .saturating_to_num::(); - let price: U96F32 = - ::SwapInterface::current_alpha_price(netuid.into()); - let owner_emission_tao_u64: u64 = U96F32::from_num(owner_alpha_u64) - .saturating_mul(price) - .floor() - .saturating_to_num::(); + let owner_emission_tao_u64: u64 = ::SwapInterface::sim_swap( + netuid.into(), + OrderType::Sell, + owner_alpha_u64, + ) + .map(|res| res.amount_paid_out) + .unwrap_or_else(|_| { + // Fallback matches the pallet's fallback + let price: U96F32 = + ::SwapInterface::current_alpha_price(netuid.into()); + U96F32::from_num(owner_alpha_u64) + .saturating_mul(price) + .floor() + .saturating_to_num::() + }); + let expected_refund: u64 = lock.saturating_sub(owner_emission_tao_u64); // ── 6) run distribution (credits τ to coldkeys, wipes α state) ───── diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 75f050632c..c3652b10c9 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1213,218 +1213,55 @@ impl Pallet { T::ProtocolId::get().into_account_truncating() } - /// Distribute `alpha_total` back to the coldkey's hotkeys for `netuid`. - /// - Pro‑rata by current α stake on this subnet; if all zero, split evenly. - /// - Deterministic "largest remainders" rounding to ensure exact conservation. - /// - Robust to partial deposit failures: retries across successes, final fallback to (cold, cold). - pub fn refund_alpha(netuid: NetUid, coldkey: &T::AccountId, alpha_total: AlphaCurrency) { - if alpha_total.is_zero() { - return; - } - - // 1) Recipient set - let mut hotkeys: sp_std::vec::Vec = T::SubnetInfo::get_owned_hotkeys(coldkey); - if hotkeys.is_empty() { - hotkeys.push(coldkey.clone()); - } - - // 2) Weights = current α stake per hotkey; if all zero -> even split - let weights: sp_std::vec::Vec = hotkeys - .iter() - .map(|hk| u128::from(T::BalanceOps::alpha_balance(netuid, coldkey, hk).to_u64())) - .collect(); - - let sum_weights: u128 = weights - .iter() - .copied() - .fold(0u128, |acc, w| acc.saturating_add(w)); - let total_u128: u128 = u128::from(alpha_total.to_u64()); - let n = hotkeys.len(); - - // (account, planned_amount_u64) - let mut shares: sp_std::vec::Vec<(T::AccountId, u64)> = sp_std::vec::Vec::with_capacity(n); - - if sum_weights > 0 { - // 3a) Pro‑rata base + largest remainders (deterministic) - let mut bases: sp_std::vec::Vec = sp_std::vec::Vec::with_capacity(n); - let mut remainders: sp_std::vec::Vec<(usize, u128)> = - sp_std::vec::Vec::with_capacity(n); - - let mut base_sum: u128 = 0; - for (i, (&w, hk)) in weights.iter().zip(hotkeys.iter()).enumerate() { - let numer = total_u128.saturating_mul(w); - let base = numer.checked_div(sum_weights).unwrap_or(0); - let rem = numer.checked_rem(sum_weights).unwrap_or(0); - bases.push(base); - remainders.push((i, rem)); - base_sum = base_sum.saturating_add(base); - shares.push((hk.clone(), u64::try_from(base).unwrap_or(u64::MAX))); - } - - // Distribute leftover ones to the largest remainders; tie‑break by index for determinism - let mut leftover = total_u128.saturating_sub(base_sum); - if leftover > 0 { - remainders.sort_by(|a, b| { - // Descending by remainder, then ascending by index - b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0)) - }); - let mut k = 0usize; - while leftover > 0 && k < remainders.len() { - if let Some((idx, _)) = remainders.get(k) { - if let Some((_, amt)) = shares.get_mut(*idx) { - *amt = amt.saturating_add(1); - } - } - leftover = leftover.saturating_sub(1); - k = k.saturating_add(1); - } - } - } else { - // 3b) Even split with deterministic round‑robin remainder - let base = total_u128.checked_div(n as u128).unwrap_or(0); - let mut rem = total_u128.checked_rem(n as u128).unwrap_or(0); - for hk in hotkeys.iter() { - let mut amt = u64::try_from(base).unwrap_or(u64::MAX); - if rem > 0 { - amt = amt.saturating_add(1); - rem = rem.saturating_sub(1); - } - shares.push((hk.clone(), amt)); - } - } - - // 4) Deposit to (coldkey, each hotkey). Track leftover if any deposit fails. - let mut leftover: u64 = 0; - let mut successes: sp_std::vec::Vec = sp_std::vec::Vec::new(); - - for (hk, amt_u64) in shares.iter() { - if *amt_u64 == 0 { - continue; - } - let amt: AlphaCurrency = (*amt_u64).into(); - match T::BalanceOps::increase_stake(coldkey, hk, netuid, amt) { - Ok(_) => successes.push(hk.clone()), - Err(e) => { - log::warn!( - "refund_alpha: increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={amt_u64:?}): {e:?}" - ); - leftover = leftover.saturating_add(*amt_u64); - } - } - } - - // 5) Retry: spread any leftover across the hotkeys that succeeded in step 4. - if leftover > 0 && !successes.is_empty() { - let count = successes.len() as u64; - let base = leftover.checked_div(count).unwrap_or(0); - let mut rem = leftover.checked_rem(count).unwrap_or(0); - - let mut leftover_retry: u64 = 0; - for hk in successes.iter() { - let add: u64 = base.saturating_add(if rem > 0 { - rem = rem.saturating_sub(1); - 1 - } else { - 0 - }); - if add == 0 { - continue; - } - if let Err(e) = T::BalanceOps::increase_stake(coldkey, hk, netuid, add.into()) { - log::warn!( - "refund_alpha(retry): increase_stake failed (cold={coldkey:?}, hot={hk:?}, netuid={netuid:?}, amt={add:?}): {e:?}" - ); - leftover_retry = leftover_retry.saturating_add(add); - } - } - leftover = leftover_retry; - } - - // 6) Final fallback: deposit any remainder to (coldkey, coldkey). - if leftover > 0 { - let _ = T::BalanceOps::increase_stake(coldkey, coldkey, netuid, leftover.into()); - } - } - - /// Dissolve all LPs for `netuid`, refund providers, and reset all swap state. - /// - /// - **V3 path** (mechanism == 1 && SwapV3Initialized): - /// * Remove **all** positions via `do_remove_liquidity`. - /// * **Refund** each owner: - /// - TAO = Σ(position.tao + position.fee_tao) → credited to the owner's **coldkey** free balance. - /// - ALPHA = Σ(position.alpha + position.fee_alpha) → credited back via `refund_alpha`. - /// * Decrease "provided reserves" (principal only) for non‑protocol owners. - /// * Clear ActiveTickIndexManager entries, ticks, fee globals, price, tick, liquidity, - /// init flag, bitmap words, fee rate knob, and user LP flag. - /// - /// - **V2 / non‑V3 path**: - /// * No per‑position records exist; still defensively clear the same V3 storages (safe no‑ops). + /// Dissolve all LPs and clean state. pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { let user_lp_enabled = >::is_user_liquidity_enabled(netuid); if SwapV3Initialized::::get(netuid) { - // -------- V3: close every position, aggregate refunds, clear state -------- - - // 1) Snapshot all (owner, position_id). + // 1) Snapshot (owner, position_id). struct CloseItem { owner: A, pos_id: PositionId, } let mut to_close: sp_std::vec::Vec> = sp_std::vec::Vec::new(); - for ((owner, pos_id), _pos) in Positions::::iter_prefix((netuid,)) { to_close.push(CloseItem { owner, pos_id }); } let protocol_account = Self::protocol_account_id(); - // 2) Aggregate refunds per owner while removing positions. - use sp_std::collections::btree_map::BTreeMap; - let mut refunds: BTreeMap = BTreeMap::new(); + // Non‑protocol first + to_close + .sort_by(|a, b| (a.owner == protocol_account).cmp(&(b.owner == protocol_account))); for CloseItem { owner, pos_id } in to_close.into_iter() { let rm = Self::do_remove_liquidity(netuid, &owner, pos_id)?; - // Accumulate (TAO, α) refund: principal + fees. - let tao_add = rm.tao.saturating_add(rm.fee_tao); - let alpha_add = rm.alpha.saturating_add(rm.fee_alpha); - - refunds - .entry(owner.clone()) - .and_modify(|(t, a)| { - *t = t.saturating_add(tao_add); - *a = a.saturating_add(alpha_add); - }) - .or_insert((tao_add, alpha_add)); + // τ: refund **principal only** (no τ fees). + if rm.tao > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, rm.tao); + } if owner != protocol_account { + // Principal reserves decrease T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); - T::BalanceOps::decrease_provided_alpha_reserve(netuid, rm.alpha); - } - } - // 3) Process refunds per owner. - for (owner, (tao_sum, alpha_sum)) in refunds.into_iter() { - // TAO → coldkey free balance - if tao_sum > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, tao_sum); - } - - // α → split across all hotkeys owned by `owner`. - if !alpha_sum.is_zero() && owner != protocol_account { - Self::refund_alpha(netuid, &owner, alpha_sum); + // Burn α (principal + fees) from provided reserves; do not credit to users. + let alpha_burn = rm.alpha.saturating_add(rm.fee_alpha); + if alpha_burn > AlphaCurrency::ZERO { + T::BalanceOps::decrease_provided_alpha_reserve(netuid, alpha_burn); + } } } - // 4) Clear active tick index set by walking ticks we are about to clear. + // 3) Clear active tick index entries, then all swap state. let active_ticks: sp_std::vec::Vec = Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); for ti in active_ticks { ActiveTickIndexManager::::remove(netuid, ti); } - // 5) Clear storage for this netuid. let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); @@ -1440,22 +1277,19 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, user_lp_enabled={user_lp_enabled}, v3_state_cleared + refunds" + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, user_lp_enabled={user_lp_enabled}, positions closed; τ principal refunded; α burned; state cleared" ); return Ok(()); } - // -------- V2 / non‑V3: no positions to close; still nuke any V3 residues -------- - + // V2 / non‑V3: ensure V3 residues are cleared (safe no‑ops). let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); - let active_ticks: sp_std::vec::Vec = Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); for ti in active_ticks { ActiveTickIndexManager::::remove(netuid, ti); } - let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); FeeGlobalTao::::remove(netuid); From 16c59e72da61af74935891571f9682d2ae5b9f1d Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 17:57:35 -0700 Subject: [PATCH 198/379] set toolchain version --- Dockerfile-localnet | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 70232b0553..e7e0047414 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -26,7 +26,8 @@ WORKDIR /build # Install Rust RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" -RUN rustup toolchain install +RUN rustup toolchain install 1.88.0 +RUN rustup default 1.88.0'' RUN rustup target add wasm32v1-none ## Build fast-runtime node From 2670fe844092048d2277024f6020eaacd45014c1 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 18:27:28 -0700 Subject: [PATCH 199/379] set prev runner SubtensorCI --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index da9732f186..966a6ed38c 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -28,7 +28,7 @@ permissions: jobs: publish: - runs-on: [self-hosted, type-ccx53, type-ccx43, type-ccx33] + runs-on: SubtensorCI steps: - name: Determine Docker tag and ref From 8eb1a1b99e53a4d4a47b851d43f0804fcb0e4256 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 18:27:54 -0700 Subject: [PATCH 200/379] add --profile minimal to toolchain install --- Dockerfile-localnet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index e7e0047414..491362e293 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -26,7 +26,7 @@ WORKDIR /build # Install Rust RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" -RUN rustup toolchain install 1.88.0 +RUN rustup toolchain install 1.88.0 --profile minimal RUN rustup default 1.88.0'' RUN rustup target add wasm32v1-none From c006192570296badaa2d556b44d203f679a9506a Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 18:44:33 -0700 Subject: [PATCH 201/379] debug --- .github/workflows/docker-localnet.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 966a6ed38c..133f3dd800 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -51,6 +51,13 @@ jobs: with: ref: ${{ env.ref }} + - name: Show current Git branch + run: | + echo "===============================" + echo "Current Git branch:" + git rev-parse --abbrev-ref HEAD + echo "===============================" + - name: Set up QEMU uses: docker/setup-qemu-action@v3 From d77c2b14b35874a983fb001236d9eaef877871b2 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 11 Sep 2025 09:50:21 +0800 Subject: [PATCH 202/379] refactor the method --- evm-tests/src/subtensor.ts | 24 +++++++++++++++++++ .../neuron.precompile.reveal-weights.test.ts | 18 +++----------- .../neuron.precompile.set-weights.test.ts | 20 ++++------------ .../test/staking.precompile.reward.test.ts | 20 ++++------------ .../subnet.precompile.hyperparameter.test.ts | 19 +++------------ 5 files changed, 38 insertions(+), 63 deletions(-) diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts index 9b351628d0..563437ae4a 100644 --- a/evm-tests/src/subtensor.ts +++ b/evm-tests/src/subtensor.ts @@ -377,4 +377,28 @@ export async function setTargetRegistrationsPerInterval( call: internal_tx.decodedCall, }); await waitForTransactionWithRetry(api, tx, alice); +} + +// Disable admin freeze window and owner hyperparam rate limiting for tests +export async function disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api: TypedApi) { + const alice = getAliceSigner() + + const currentAdminFreezeWindow = await api.query.SubtensorModule.AdminFreezeWindow.getValue() + if (currentAdminFreezeWindow !== 0) { + // Set AdminFreezeWindow to 0 + const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) + const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) + await waitForTransactionWithRetry(api, sudoFreezeTx, alice) + } + + const currentOwnerHyperparamRateLimit = await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue() + if (currentOwnerHyperparamRateLimit !== 0) { + // Set OwnerHyperparamRateLimit to 0 + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) + await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) + } + + assert.equal(0, await api.query.SubtensorModule.AdminFreezeWindow.getValue()) + assert.equal(0, await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue()) } \ No newline at end of file diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 52ddc91967..99d608585d 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -1,5 +1,5 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" +import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { PolkadotSigner, TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58, convertH160ToSS58 } from "../src/address-utils" @@ -14,6 +14,7 @@ import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address, addNewSubnetwork, setWeightsSetRateLimit, burnedRegister, setTempo, setCommitRevealWeightsInterval, startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit, } from "../src/subtensor" // hardcode some values for reveal hash @@ -70,20 +71,7 @@ describe("Test neuron precompile reveal weights", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) - // Disable admin freeze window and owner hyperparam rate limiting for tests - { - const alice = getAliceSigner() - - // Set AdminFreezeWindow to 0 - const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) - const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) - await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) - const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) - await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) - } + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) await setWeightsSetRateLimit(api, netuid, BigInt(0)) diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 4ecc0b36db..8ff9258664 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -1,6 +1,6 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertH160ToSS58, convertPublicKeyToSs58, } from "../src/address-utils" @@ -10,7 +10,8 @@ import { generateRandomEthersWallet } from "../src/utils" import { forceSetBalanceToSs58Address, forceSetBalanceToEthAddress, addNewSubnetwork, burnedRegister, setCommitRevealWeightsEnabled, setWeightsSetRateLimit, - startCall + startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit } from "../src/subtensor" describe("Test neuron precompile contract, set weights function", () => { @@ -38,20 +39,7 @@ describe("Test neuron precompile contract, set weights function", () => { await burnedRegister(api, netuid, convertH160ToSS58(wallet.address), coldkey) const uid = await api.query.SubtensorModule.Uids.getValue(netuid, convertH160ToSS58(wallet.address)) assert.notEqual(uid, undefined) - // Disable admin freeze window and owner hyperparam rate limiting for tests - { - const alice = getAliceSigner() - - // Set AdminFreezeWindow to 0 - const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) - const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) - await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) - const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) - await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) - } + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) // disable reveal and enable direct set weights await setCommitRevealWeightsEnabled(api, netuid, false) await setWeightsSetRateLimit(api, netuid, BigInt(0)) diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 108e0ed88c..251fb41ea5 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -1,5 +1,5 @@ import * as assert from "assert"; -import { getAliceSigner, getDevnetApi, getRandomSubstrateKeypair, waitForTransactionWithRetry } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" @@ -8,7 +8,8 @@ import { forceSetBalanceToSs58Address, addNewSubnetwork, burnedRegister, setTxRateLimit, setTempo, setWeightsSetRateLimit, setSubnetOwnerCut, setMaxAllowedUids, setMinDelegateTake, setActivityCutoff, addStake, setWeight, rootRegister, - startCall + startCall, + disableAdminFreezeWindowAndOwnerHyperparamRateLimit } from "../src/subtensor" describe("Test neuron precompile reward", () => { @@ -39,20 +40,7 @@ describe("Test neuron precompile reward", () => { await startCall(api, netuid, coldkey) console.log("test the case on subnet ", netuid) - // Disable admin freeze window and owner hyperparam rate limiting for tests - { - const alice = getAliceSigner() - - // Set AdminFreezeWindow to 0 - const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) - const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) - await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) - const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) - await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) - } + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) await setTxRateLimit(api, BigInt(0)) await setTempo(api, root_netuid, root_tempo) diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index 5d81049d41..e3b5708e50 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -1,13 +1,13 @@ import * as assert from "assert"; -import { getDevnetApi, getRandomSubstrateKeypair, getAliceSigner, waitForTransactionWithRetry } from "../src/substrate" +import { getDevnetApi, getRandomSubstrateKeypair } from "../src/substrate" import { devnet } from "@polkadot-api/descriptors" import { TypedApi } from "polkadot-api"; import { convertPublicKeyToSs58 } from "../src/address-utils" import { generateRandomEthersWallet } from "../src/utils"; import { ISubnetABI, ISUBNET_ADDRESS } from "../src/contracts/subnet" import { ethers } from "ethers" -import { forceSetBalanceToEthAddress, forceSetBalanceToSs58Address } from "../src/subtensor" +import { disableAdminFreezeWindowAndOwnerHyperparamRateLimit, forceSetBalanceToEthAddress, forceSetBalanceToSs58Address } from "../src/subtensor" describe("Test the Subnet precompile contract", () => { // init eth part @@ -26,20 +26,7 @@ describe("Test the Subnet precompile contract", () => { await forceSetBalanceToSs58Address(api, convertPublicKeyToSs58(hotkey2.publicKey)) await forceSetBalanceToEthAddress(api, wallet.address) - // Disable admin freeze window and owner hyperparam rate limiting for tests - { - const alice = getAliceSigner() - - // Set AdminFreezeWindow to 0 - const setFreezeWindow = api.tx.AdminUtils.sudo_set_admin_freeze_window({ window: 0 }) - const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) - await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) - const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) - await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) - } + await disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api) }) it("Can register network without identity info", async () => { From 629cedb90f19daf04a926916fa32ea152e9ddc81 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 11 Sep 2025 10:18:10 +0800 Subject: [PATCH 203/379] commit Cargo.lock --- evm-tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evm-tests/package.json b/evm-tests/package.json index ae756ae55f..00658847b4 100644 --- a/evm-tests/package.json +++ b/evm-tests/package.json @@ -1,6 +1,6 @@ { "scripts": { - "test": "mocha --timeout 999999 --retries 3 --file src/setup.ts --require ts-node/register test/*test.ts" + "test": "mocha --timeout 999999 --retries 3 --file src/setup.ts --require ts-node/register test/neuron*reveal*test.ts" }, "keywords": [], "author": "", From 915b0023f5da380c280fa18ecef64193064b28bb Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 11 Sep 2025 10:19:37 +0800 Subject: [PATCH 204/379] cargo fix --- evm-tests/src/subtensor.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evm-tests/src/subtensor.ts b/evm-tests/src/subtensor.ts index 563437ae4a..e1091c1645 100644 --- a/evm-tests/src/subtensor.ts +++ b/evm-tests/src/subtensor.ts @@ -392,7 +392,7 @@ export async function disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api: T } const currentOwnerHyperparamRateLimit = await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue() - if (currentOwnerHyperparamRateLimit !== 0) { + if (currentOwnerHyperparamRateLimit !== BigInt(0)) { // Set OwnerHyperparamRateLimit to 0 const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) @@ -400,5 +400,5 @@ export async function disableAdminFreezeWindowAndOwnerHyperparamRateLimit(api: T } assert.equal(0, await api.query.SubtensorModule.AdminFreezeWindow.getValue()) - assert.equal(0, await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue()) + assert.equal(BigInt(0), await api.query.SubtensorModule.OwnerHyperparamRateLimit.getValue()) } \ No newline at end of file From d3b0299ff6fbcdf62de23ce58282b6868841f5b2 Mon Sep 17 00:00:00 2001 From: open-junius Date: Thu, 11 Sep 2025 10:21:41 +0800 Subject: [PATCH 205/379] fix type --- evm-tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evm-tests/package.json b/evm-tests/package.json index 00658847b4..ae756ae55f 100644 --- a/evm-tests/package.json +++ b/evm-tests/package.json @@ -1,6 +1,6 @@ { "scripts": { - "test": "mocha --timeout 999999 --retries 3 --file src/setup.ts --require ts-node/register test/neuron*reveal*test.ts" + "test": "mocha --timeout 999999 --retries 3 --file src/setup.ts --require ts-node/register test/*test.ts" }, "keywords": [], "author": "", From 5789b9e5c24cfc532dbd3ca239f967be2e9e0c31 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 21:13:07 -0700 Subject: [PATCH 206/379] extend workflow --- .github/workflows/check-bittensor-e2e-tests.yml.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.github/workflows/check-bittensor-e2e-tests.yml.yml b/.github/workflows/check-bittensor-e2e-tests.yml.yml index 902ea2cd39..721eb96994 100644 --- a/.github/workflows/check-bittensor-e2e-tests.yml.yml +++ b/.github/workflows/check-bittensor-e2e-tests.yml.yml @@ -110,6 +110,7 @@ jobs: build-image-with-current-branch: needs: check-label + if: needs.check-label.outputs.skip-bittensor-e2e-tests == 'false' runs-on: [self-hosted, type-ccx33] steps: - name: Checkout code @@ -211,12 +212,6 @@ jobs: - name: Retag Docker Image run: docker tag localnet ghcr.io/opentensor/subtensor-localnet:devnet-ready -# - name: Run tests -# working-directory: ${{ github.workspace }}/btcli -# run: | -# source ${{ github.workspace }}/venv/bin/activate -# uv run pytest ${{ matrix.test-file }} -s - - name: Run with retry working-directory: ${{ github.workspace }}/btcli run: | @@ -311,12 +306,6 @@ jobs: - name: Retag Docker Image run: docker tag localnet ghcr.io/opentensor/subtensor-localnet:devnet-ready -# - name: Run tests -# working-directory: ${{ github.workspace }}/bittensor -# run: | -# source ${{ github.workspace }}/venv/bin/activate -# uv run pytest ${{ matrix.test-file }} -s - - name: Run with retry working-directory: ${{ github.workspace }}/bittensor run: | From 70199d99ded569e74039491e57efe1ed233410d1 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 21:13:15 -0700 Subject: [PATCH 207/379] extend .dockerignore --- .dockerignore | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 2886a059a9..e12ea8709b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,26 @@ +# IDE/Editor configs .devcontainer -.github .vscode +.idea +*.swp +*.swo + +# Build artifacts target/ +*.pyc +*.pyo +*.pyd +__pycache__/ + +# Git-related +.git +.gitignore + +# CI/CD +.github .dockerignore +.gitattributes + +# Dockerfiles Dockerfile +Dockerfile-localnet \ No newline at end of file From fe6410948081cf71974ae9a98f6af115dace257b Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 10 Sep 2025 21:17:33 -0700 Subject: [PATCH 208/379] try to use cache for step `Build and push Docker image` --- .github/workflows/docker-localnet.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 133f3dd800..13068682c7 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -86,3 +86,5 @@ jobs: tags: | ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} + cache-from: type=gha + cache-to: type=gha,mode=max From 45bf58df70cef64071888014921935c8c512b07a Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 10:38:29 -0300 Subject: [PATCH 209/379] trigger ci From 31fc0f248ba6daee10847ee8ef5e9db463b90962 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 14:54:03 -0300 Subject: [PATCH 210/379] added umbrella crate package to workspace --- Cargo.lock | 15 +++++---------- Cargo.toml | 1 + 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2048237da..bf296b01cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6913,7 +6913,7 @@ dependencies = [ "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-preimage", - "pallet-proxy 38.0.0", + "pallet-proxy 40.1.0", "pallet-registry", "pallet-safe-mode", "pallet-scheduler", @@ -7946,18 +7946,13 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "38.0.0" +version = "40.1.0" dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", "pallet-balances", "pallet-utility 38.0.0", "parity-scale-codec", + "polkadot-sdk-frame", "scale-info", - "sp-core", - "sp-io", - "sp-runtime", "subtensor-macros", ] @@ -8012,7 +8007,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "pallet-proxy 40.1.0", + "pallet-proxy 40.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", "pallet-utility 40.0.0", "parity-scale-codec", "scale-info", @@ -13702,7 +13697,7 @@ dependencies = [ "pallet-evm-precompile-modexp", "pallet-evm-precompile-sha3fips", "pallet-evm-precompile-simple", - "pallet-proxy 38.0.0", + "pallet-proxy 40.1.0", "pallet-subtensor", "pallet-subtensor-swap", "precompile-utils", diff --git a/Cargo.toml b/Cargo.toml index 3415b8624d..2fb9ea9644 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,6 +113,7 @@ expander = "2" ahash = { version = "0.8", default-features = false } regex = { version = "1.11.1", default-features = false } +frame = { package = "polkadot-sdk-frame", git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-stable2503-6", default-features = false } From 442fa3def32be3f107bc1d09a1f565b34819be2b Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 14:56:17 -0300 Subject: [PATCH 211/379] upgrade pallet-proxy --- pallets/proxy/Cargo.toml | 30 +- pallets/proxy/src/benchmarking.rs | 437 +++++++++++++++++++++++------- pallets/proxy/src/lib.rs | 302 ++++++++++++++++----- pallets/proxy/src/tests.rs | 346 ++++++++++++++++++++--- pallets/proxy/src/weights.rs | 295 +++++++++++--------- 5 files changed, 1038 insertions(+), 372 deletions(-) diff --git a/pallets/proxy/Cargo.toml b/pallets/proxy/Cargo.toml index 4f5dddfed1..ffca8ad8a7 100644 --- a/pallets/proxy/Cargo.toml +++ b/pallets/proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-proxy" -version = "38.0.0" +version = "40.1.0" authors = ["Bittensor Nucleus Team"] edition.workspace = true license = "Apache-2.0" @@ -15,43 +15,29 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { workspace = true, features = ["derive", "max-encoded-len"] } +codec = { workspace = true, features = ["max-encoded-len"] } +frame = { workspace = true, features = ["runtime"] } scale-info = { workspace = true, features = ["derive"] } -frame-benchmarking = { workspace = true, optional = true } -frame-support.workspace = true -frame-system.workspace = true -sp-io.workspace = true -sp-runtime.workspace = true subtensor-macros.workspace = true [dev-dependencies] -pallet-balances = { workspace = true, default-features = true } -pallet-utility = { workspace = true, default-features = true } -sp-core = { workspace = true, default-features = true } +pallet-balances = { default-features = true, workspace = true } +pallet-utility = { default-features = true, workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", + "frame/std", "scale-info/std", - "sp-io/std", - "sp-runtime/std", ] runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", + "frame/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-utility/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", + "frame/try-runtime", "pallet-balances/try-runtime", "pallet-utility/try-runtime", ] diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs index 0e0d89f03e..e24e877160 100644 --- a/pallets/proxy/src/benchmarking.rs +++ b/pallets/proxy/src/benchmarking.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -// + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0/ +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -22,9 +22,9 @@ use super::*; use crate::Pallet as Proxy; use alloc::{boxed::Box, vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; -use frame_system::{RawOrigin, pallet_prelude::BlockNumberFor}; -use sp_runtime::traits::{Bounded, CheckedDiv}; +use frame::benchmarking::prelude::{ + BenchmarkError, RawOrigin, account, benchmarks, impl_test_function, whitelisted_caller, +}; const SEED: u32 = 0; @@ -32,15 +32,13 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn half_max_balance() -> BalanceOf { - BalanceOf::::max_value() - .checked_div(&BalanceOf::::from(2_u32)) - .unwrap_or_else(BalanceOf::::max_value) +fn assert_has_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_has_event(generic_event.into()); } fn add_proxies(n: u32, maybe_who: Option) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(whitelisted_caller); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); for i in 0..n { let real = T::Lookup::unlookup(account("target", i, SEED)); @@ -61,12 +59,12 @@ fn add_announcements( ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); let real = if let Some(real) = maybe_real { real } else { let real = account("real", 0, SEED); - T::Currency::make_free_balance_be(&real, half_max_balance::()); + T::Currency::make_free_balance_be(&real, BalanceOf::::max_value() / 2u32.into()); Proxy::::add_proxy( RawOrigin::Signed(real.clone()).into(), caller_lookup, @@ -86,157 +84,256 @@ fn add_announcements( Ok(()) } -benchmarks! { - proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + real_lookup, + Some(T::ProxyType::default()), + Box::new(call), + ); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - proxy_announced { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn proxy_announced( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy let caller: T::AccountId = account("pure", 0, SEED); - let delegate: T::AccountId = account("target", p.saturating_sub(1), SEED); + let delegate: T::AccountId = account("target", p - 1, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); - T::Currency::make_free_balance_be(&delegate, half_max_balance::()); + T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) - verify { - assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) + + #[extrinsic_call] + _( + RawOrigin::Signed(caller), + delegate_lookup, + real_lookup, + Some(T::ProxyType::default()), + Box::new(call), + ); + + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()); + + Ok(()) } - remove_announcement { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + #[benchmark] + fn remove_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + real_lookup, + T::CallHasher::hash_of(&call), + ); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - reject_announcement { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); + #[benchmark] + fn reject_announcement( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup, T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(real), + caller_lookup, + T::CallHasher::hash_of(&call), + ); + let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); + + Ok(()) } - announce { - let a in 0 .. T::MaxPending::get().saturating_sub(1); - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; - let caller: T::AccountId = account("target", p.saturating_sub(1), SEED); - T::Currency::make_free_balance_be(&caller, half_max_balance::()); + #[benchmark] + fn announce( + a: Linear<0, { T::MaxPending::get() - 1 }>, + p: Linear<1, { T::MaxProxies::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; + // In this case the caller is the "target" proxy + let caller: T::AccountId = account("target", p - 1, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); + // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = + frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) - verify { - assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash); + + assert_last_event::( + Event::Announced { + real, + proxy: caller, + call_hash, + } + .into(), + ); + + Ok(()) } - add_proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn add_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - real, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + real, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); - assert_eq!(proxies.len() as u32, p.saturating_add(1)); + assert_eq!(proxies.len() as u32, p + 1); + + Ok(()) } - remove_proxy { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxy(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); let delegate = T::Lookup::unlookup(account("target", 0, SEED)); - }: _( - RawOrigin::Signed(caller.clone()), - delegate, - T::ProxyType::default(), - BlockNumberFor::::zero() - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + delegate, + T::ProxyType::default(), + BlockNumberFor::::zero(), + ); + let (proxies, _) = Proxies::::get(caller); - assert_eq!(proxies.len() as u32, p.saturating_sub(1)); + assert_eq!(proxies.len() as u32, p - 1); + + Ok(()) } - remove_proxies { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn remove_proxies(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller.clone())) - verify { + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone())); + let (proxies, _) = Proxies::::get(caller); assert_eq!(proxies.len() as u32, 0); + + Ok(()) } - create_pure { - let p in 1 .. (T::MaxProxies::get().saturating_sub(1)) => add_proxies::(p, None)?; + #[benchmark] + fn create_pure(p: Linear<1, { T::MaxProxies::get() - 1 }>) -> Result<(), BenchmarkError> { + add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); - }: _( - RawOrigin::Signed(caller.clone()), - T::ProxyType::default(), - BlockNumberFor::::zero(), - 0 - ) - verify { + + #[extrinsic_call] + _( + RawOrigin::Signed(caller.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + 0, + ); + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::PureCreated { - pure: pure_account, - who: caller, - proxy_type: T::ProxyType::default(), - disambiguation_index: 0, - }.into()); - } + assert_last_event::( + Event::PureCreated { + pure: pure_account, + who: caller, + proxy_type: T::ProxyType::default(), + disambiguation_index: 0, + } + .into(), + ); - kill_pure { - let p in 0 .. (T::MaxProxies::get().saturating_sub(2)); + Ok(()) + } + #[benchmark] + fn kill_pure(p: Linear<0, { T::MaxProxies::get() - 2 }>) -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -244,17 +341,149 @@ benchmarks! { RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), BlockNumberFor::::zero(), - 0 + 0, )?; - let height = system::Pallet::::block_number(); - let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); + let height = T::BlockNumberProvider::current_block_number(); + let ext_index = frame_system::Pallet::::extrinsic_index().unwrap_or(0); let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); add_proxies::(p, Some(pure_account.clone()))?; - ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); - }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) - verify { + ensure!( + Proxies::::contains_key(&pure_account), + "pure proxy not created" + ); + + #[extrinsic_call] + _( + RawOrigin::Signed(pure_account.clone()), + caller_lookup, + T::ProxyType::default(), + 0, + height, + ext_index, + ); + assert!(!Proxies::::contains_key(&pure_account)); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + // Create accounts using the same pattern as other benchmarks + let account_1: T::AccountId = account("account", 1, SEED); + let account_2: T::AccountId = account("account", 2, SEED); + let account_3: T::AccountId = account("account", 3, SEED); + + // Fund accounts + T::Currency::make_free_balance_be(&account_1, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_2, BalanceOf::::max_value() / 100u8.into()); + T::Currency::make_free_balance_be(&account_3, BalanceOf::::max_value() / 100u8.into()); + + // Add proxy relationships + Proxy::::add_proxy( + RawOrigin::Signed(account_1.clone()).into(), + T::Lookup::unlookup(account_2.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + Proxy::::add_proxy( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_3.clone()), + T::ProxyType::default(), + BlockNumberFor::::zero(), + )?; + let (proxies, initial_proxy_deposit) = Proxies::::get(&account_2); + assert!(!initial_proxy_deposit.is_zero()); + assert_eq!( + initial_proxy_deposit, + T::Currency::reserved_balance(&account_2) + ); + + // Create announcement + Proxy::::announce( + RawOrigin::Signed(account_2.clone()).into(), + T::Lookup::unlookup(account_1.clone()), + T::CallHasher::hash_of(&("add_announcement", 1)), + )?; + let (announcements, initial_announcement_deposit) = Announcements::::get(&account_2); + assert!(!initial_announcement_deposit.is_zero()); + assert_eq!( + initial_announcement_deposit.saturating_add(initial_proxy_deposit), + T::Currency::reserved_balance(&account_2) + ); + + // Artificially inflate deposits and reserve the extra amount + let extra_proxy_deposit = initial_proxy_deposit; // Double the deposit + let extra_announcement_deposit = initial_announcement_deposit; // Double the deposit + let total = extra_proxy_deposit.saturating_add(extra_announcement_deposit); + + T::Currency::reserve(&account_2, total)?; + + let initial_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(initial_reserved, total.saturating_add(total)); // Double + + // Update storage with increased deposits + Proxies::::insert( + &account_2, + ( + proxies, + initial_proxy_deposit.saturating_add(extra_proxy_deposit), + ), + ); + Announcements::::insert( + &account_2, + ( + announcements, + initial_announcement_deposit.saturating_add(extra_announcement_deposit), + ), + ); + + // Verify artificial state + let (_, inflated_proxy_deposit) = Proxies::::get(&account_2); + let (_, inflated_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!( + inflated_proxy_deposit, + initial_proxy_deposit.saturating_add(extra_proxy_deposit) + ); + assert_eq!( + inflated_announcement_deposit, + initial_announcement_deposit.saturating_add(extra_announcement_deposit) + ); + + #[extrinsic_call] + _(RawOrigin::Signed(account_2.clone())); + + // Verify results + let (_, final_proxy_deposit) = Proxies::::get(&account_2); + let (_, final_announcement_deposit) = Announcements::::get(&account_2); + assert_eq!(final_proxy_deposit, initial_proxy_deposit); + assert_eq!(final_announcement_deposit, initial_announcement_deposit); + + let final_reserved = T::Currency::reserved_balance(&account_2); + assert_eq!(final_reserved, initial_reserved.saturating_sub(total)); + + // Verify events + assert_has_event::( + Event::DepositPoked { + who: account_2.clone(), + kind: DepositKind::Proxies, + old_deposit: inflated_proxy_deposit, + new_deposit: final_proxy_deposit, + } + .into(), + ); + assert_last_event::( + Event::DepositPoked { + who: account_2, + kind: DepositKind::Announcements, + old_deposit: inflated_announcement_deposit, + new_deposit: final_announcement_deposit, + } + .into(), + ); + + Ok(()) } impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs index 9a7aab857a..807bac088c 100644 --- a/pallets/proxy/src/lib.rs +++ b/pallets/proxy/src/lib.rs @@ -1,13 +1,13 @@ // This file is part of Substrate. -// + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -// + // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0/ +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -34,24 +34,12 @@ mod tests; pub mod weights; extern crate alloc; - use alloc::{boxed::Box, vec}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::pallet_prelude::{Pays, Weight}; -use frame_support::{ - BoundedVec, - dispatch::GetDispatchInfo, - ensure, - traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, +use frame::{ + prelude::*, + traits::{Currency, InstanceFilter, ReservableCurrency}, }; -use frame_system::{self as system, ensure_signed, pallet_prelude::BlockNumberFor}; pub use pallet::*; -use scale_info::{TypeInfo, prelude::cmp::Ordering}; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - DispatchError, DispatchResult, RuntimeDebug, - traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, -}; use subtensor_macros::freeze_struct; pub use weights::WeightInfo; @@ -60,6 +48,9 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The parameters under which a particular account has a proxy relationship with some other @@ -100,11 +91,29 @@ pub struct Announcement { height: BlockNumber, } -#[frame_support::pallet] +/// The type of deposit +#[derive( + Encode, + Decode, + Clone, + Copy, + Eq, + PartialEq, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, + DecodeWithMemTracking, +)] +pub enum DepositKind { + /// Proxy registration deposit + Proxies, + /// Announcement deposit + Announcements, +} + +#[frame::pallet] pub mod pallet { - use super::{DispatchResult, *}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use super::*; #[pallet::pallet] pub struct Pallet(_); @@ -134,7 +143,7 @@ pub mod pallet { + Member + Ord + PartialOrd - + InstanceFilter<::RuntimeCall> + + frame::traits::InstanceFilter<::RuntimeCall> + Default + MaxEncodedLen; @@ -180,6 +189,30 @@ pub mod pallet { /// into a pre-existing storage value. #[pallet::constant] type AnnouncementDepositFactor: Get>; + + /// Query the current block number. + /// + /// Must return monotonically increasing values when called from consecutive blocks. + /// Can be configured to return either: + /// - the local block number of the runtime via `frame_system::Pallet` + /// - a remote block number, eg from the relay chain through `RelaychainDataProvider` + /// - an arbitrary value through a custom implementation of the trait + /// + /// There is currently no migration provided to "hot-swap" block number providers and it may + /// result in undefined behavior when doing so. Parachains are therefore best off setting + /// this to their local block number provider if they have the pallet already deployed. + /// + /// Suggested values: + /// - Solo- and Relay-chains: `frame_system::Pallet` + /// - Parachains that may produce blocks sparingly or only when needed (on-demand): + /// - already have the pallet deployed: `frame_system::Pallet` + /// - are freshly deploying this pallet: `RelaychainDataProvider` + /// - Parachains with a reliably block production rate (PLO or bulk-coretime): + /// - already have the pallet deployed: `frame_system::Pallet` + /// - are freshly deploying this pallet: no strong recommendation. Both local and remote + /// providers can be used. Relay provider can be a bit better in cases where the + /// parachain is lagging its block production to avoid clock skew. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::call] @@ -196,13 +229,11 @@ pub mod pallet { #[pallet::call_index(0)] #[pallet::weight({ let di = call.get_dispatch_info(); - let inner_call_weight = match di.pays_fee { - Pays::Yes => di.call_weight, - Pays::No => Weight::zero(), - }; - let base_weight = T::WeightInfo::proxy(T::MaxProxies::get()) - .saturating_add(T::DbWeight::get().reads_writes(1, 1)); - (base_weight.saturating_add(inner_call_weight), di.class) + (T::WeightInfo::proxy(T::MaxProxies::get()) + // AccountData for inner call origin accountdata. + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.call_weight), + di.class, di.pays_fee) })] pub fn proxy( origin: OriginFor, @@ -283,12 +314,12 @@ pub mod pallet { /// /// - `proxy_type`: The type of the proxy that the sender will be registered as over the /// new account. This will almost always be the most permissive `ProxyType` possible to - /// allow for maximum flexibility. + /// allow for maximum flexibility. /// - `index`: A disambiguation index, in case this is called multiple times in the same - /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just - /// want to use `0`. + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. /// /// Fails with `Duplicate` if this has already been called in this transaction, from the /// same sender, with the same parameters. @@ -409,7 +440,7 @@ pub mod pallet { let announcement = Announcement { real: real.clone(), call_hash, - height: system::Pallet::::block_number(), + height: T::BlockNumberProvider::current_block_number(), }; Announcements::::try_mutate(&who, |(pending, deposit)| { @@ -526,7 +557,7 @@ pub mod pallet { let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); - let now = system::Pallet::::block_number(); + let now = T::BlockNumberProvider::current_block_number(); Self::edit_announcements(&delegate, |ann| { ann.real != real || ann.call_hash != call_hash @@ -538,6 +569,109 @@ pub mod pallet { Ok(()) } + + /// Poke / Adjust deposits made for proxies and announcements based on current values. + /// This can be used by accounts to possibly lower their locked amount. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The transaction fee is waived if the deposit amount has changed. + /// + /// Emits `DepositPoked` if successful. + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let mut deposit_updated = false; + + // Check and update proxy deposits + Proxies::::try_mutate_exists(&who, |maybe_proxies| -> DispatchResult { + let (proxies, old_deposit) = maybe_proxies.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::ProxyDepositBase::get(), + T::ProxyDepositFactor::get(), + proxies.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_proxies = Some((proxies, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit, + }); + } + Some(_) => { + *maybe_proxies = Some((proxies, old_deposit)); + } + None => { + *maybe_proxies = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Proxies, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + } + } + Ok(()) + })?; + + // Check and update announcement deposits + Announcements::::try_mutate_exists(&who, |maybe_announcements| -> DispatchResult { + let (announcements, old_deposit) = maybe_announcements.take().unwrap_or_default(); + let maybe_new_deposit = Self::rejig_deposit( + &who, + old_deposit, + T::AnnouncementDepositBase::get(), + T::AnnouncementDepositFactor::get(), + announcements.len(), + )?; + + match maybe_new_deposit { + Some(new_deposit) if new_deposit != old_deposit => { + *maybe_announcements = Some((announcements, new_deposit)); + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit, + }); + } + Some(_) => { + *maybe_announcements = Some((announcements, old_deposit)); + } + None => { + *maybe_announcements = None; + if !old_deposit.is_zero() { + deposit_updated = true; + Self::deposit_event(Event::DepositPoked { + who: who.clone(), + kind: DepositKind::Announcements, + old_deposit, + new_deposit: BalanceOf::::zero(), + }); + } + } + } + Ok(()) + })?; + + Ok(if deposit_updated { + Pays::No.into() + } else { + Pays::Yes.into() + }) + } } #[pallet::event] @@ -584,6 +718,13 @@ pub mod pallet { // The index originally passed to `create_pure` when this pure proxy was created. disambiguation_index: u16, }, + /// A deposit stored for proxies or announcements was poked / updated. + DepositPoked { + who: T::AccountId, + kind: DepositKind, + old_deposit: BalanceOf, + new_deposit: BalanceOf, + }, } #[pallet::error] @@ -635,6 +776,22 @@ pub mod pallet { ), ValueQuery, >; + + #[pallet::view_functions_experimental] + impl Pallet { + /// Check if a `RuntimeCall` is allowed for a given `ProxyType`. + pub fn check_permissions( + call: ::RuntimeCall, + proxy_type: T::ProxyType, + ) -> bool { + proxy_type.filter(&call) + } + + /// Check if one `ProxyType` is a subset of another `ProxyType`. + pub fn is_superset(to_check: T::ProxyType, against: T::ProxyType) -> bool { + to_check.is_superset(&against) + } + } } impl Pallet { @@ -662,13 +819,13 @@ impl Pallet { /// /// - `who`: The spawner account. /// - `proxy_type`: The type of the proxy that the sender will be registered as over the - /// new account. This will almost always be the most permissive `ProxyType` possible to - /// allow for maximum flexibility. + /// new account. This will almost always be the most permissive `ProxyType` possible to + /// allow for maximum flexibility. /// - `index`: A disambiguation index, in case this is called multiple times in the same - /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just - /// want to use `0`. + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. /// - `maybe_when`: The block height and extrinsic index of when the pure account was - /// created. None to use current block height and extrinsic index. + /// created. None to use current block height and extrinsic index. pub fn pure_account( who: &T::AccountId, proxy_type: &T::ProxyType, @@ -677,8 +834,8 @@ impl Pallet { ) -> T::AccountId { let (height, ext_index) = maybe_when.unwrap_or_else(|| { ( - system::Pallet::::block_number(), - system::Pallet::::extrinsic_index().unwrap_or_default(), + T::BlockNumberProvider::current_block_number(), + frame_system::Pallet::::extrinsic_index().unwrap_or_default(), ) }); let entropy = ( @@ -701,7 +858,7 @@ impl Pallet { /// - `delegatee`: The account that the `delegator` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. pub fn add_proxy_delegate( delegator: &T::AccountId, delegatee: T::AccountId, @@ -723,14 +880,10 @@ impl Pallet { .try_insert(i, proxy_def) .map_err(|_| Error::::TooMany)?; let new_deposit = Self::deposit(proxies.len() as u32); - match new_deposit.cmp(deposit) { - Ordering::Greater => { - T::Currency::reserve(delegator, new_deposit.saturating_sub(*deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(delegator, deposit.saturating_sub(new_deposit)); - } - Ordering::Equal => (), + if new_deposit > *deposit { + T::Currency::reserve(delegator, new_deposit - *deposit)?; + } else if new_deposit < *deposit { + T::Currency::unreserve(delegator, *deposit - new_deposit); } *deposit = new_deposit; Self::deposit_event(Event::::ProxyAdded { @@ -750,7 +903,7 @@ impl Pallet { /// - `delegatee`: The account that the `delegator` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. pub fn remove_proxy_delegate( delegator: &T::AccountId, delegatee: T::AccountId, @@ -770,14 +923,10 @@ impl Pallet { .ok_or(Error::::NotFound)?; proxies.remove(i); let new_deposit = Self::deposit(proxies.len() as u32); - match new_deposit.cmp(&old_deposit) { - Ordering::Greater => { - T::Currency::reserve(delegator, new_deposit.saturating_sub(old_deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(delegator, old_deposit.saturating_sub(new_deposit)); - } - Ordering::Equal => (), + if new_deposit > old_deposit { + T::Currency::reserve(delegator, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + T::Currency::unreserve(delegator, old_deposit - new_deposit); } if !proxies.is_empty() { *x = Some((proxies, new_deposit)) @@ -813,14 +962,17 @@ impl Pallet { } else { base.saturating_add(factor.saturating_mul((len as u32).into())) }; - match new_deposit.cmp(&old_deposit) { - Ordering::Greater => { - T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; - } - Ordering::Less => { - T::Currency::unreserve(who, old_deposit.saturating_sub(new_deposit)); + if new_deposit > old_deposit { + T::Currency::reserve(who, new_deposit.saturating_sub(old_deposit))?; + } else if new_deposit < old_deposit { + let excess = old_deposit.saturating_sub(new_deposit); + let remaining_unreserved = T::Currency::unreserve(who, excess); + if !remaining_unreserved.is_zero() { + defensive!( + "Failed to unreserve full amount. (Requested, Actual)", + (excess, excess.saturating_sub(remaining_unreserved)) + ); } - Ordering::Equal => (), } Ok(if len == 0 { None } else { Some(new_deposit) }) } @@ -829,12 +981,12 @@ impl Pallet { F: FnMut(&Announcement, BlockNumberFor>) -> bool, >( delegate: &T::AccountId, - mut f: F, + f: F, ) -> DispatchResult { Announcements::::try_mutate_exists(delegate, |x| { let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; let orig_pending_len = pending.len(); - pending.retain(&mut f); + pending.retain(f); ensure!(orig_pending_len > pending.len(), Error::::NotFound); *x = Self::rejig_deposit( delegate, @@ -854,7 +1006,10 @@ impl Pallet { force_proxy_type: Option, ) -> Result>, DispatchError> { let f = |x: &ProxyDefinition>| -> bool { - &x.delegate == delegate && force_proxy_type.as_ref().is_none_or(|y| &x.proxy_type == y) + &x.delegate == delegate + && force_proxy_type + .as_ref() + .map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real) .0 @@ -868,6 +1023,7 @@ impl Pallet { real: T::AccountId, call: ::RuntimeCall, ) { + use frame::traits::{InstanceFilter as _, OriginTrait as _}; // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); origin.add_filter(move |c: &::RuntimeCall| { @@ -903,7 +1059,7 @@ impl Pallet { /// Parameters: /// - `delegator`: The delegator account. pub fn remove_all_proxy_delegates(delegator: &T::AccountId) { - let (_, old_deposit) = Proxies::::take(delegator); - T::Currency::unreserve(delegator, old_deposit); + let (_, old_deposit) = Proxies::::take(&delegator); + T::Currency::unreserve(&delegator, old_deposit); } } diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index e350386164..b6734b80a8 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -20,26 +20,18 @@ #![cfg(test)] use super::*; - use crate as proxy; use alloc::{vec, vec::Vec}; -use codec::{Decode, DecodeWithMemTracking, Encode}; -use frame_support::{ - assert_noop, assert_ok, derive_impl, - traits::{ConstU32, ConstU64, Contains}, -}; -use sp_core::H256; -use sp_runtime::{BuildStorage, DispatchError, RuntimeDebug, traits::BlakeTwo256}; +use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( - pub enum Test - { - System: frame_system = 1, - Balances: pallet_balances = 2, - Proxy: proxy = 3, - Utility: pallet_utility = 4, +construct_runtime!( + pub struct Test { + System: frame_system, + Balances: pallet_balances, + Proxy: proxy, + Utility: pallet_utility, } ); @@ -87,7 +79,7 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { +impl frame::traits::InstanceFilter for ProxyType { fn filter(&self, c: &RuntimeCall) -> bool { match self { ProxyType::Any => true, @@ -115,45 +107,54 @@ impl Contains for BaseFilter { } } } + +parameter_types! { + pub static ProxyDepositBase: u64 = 1; + pub static ProxyDepositFactor: u64 = 1; + pub static AnnouncementDepositBase: u64 = 1; + pub static AnnouncementDepositFactor: u64 = 1; +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; type Currency = Balances; type ProxyType = ProxyType; - type ProxyDepositBase = ConstU64<1>; - type ProxyDepositFactor = ConstU64<1>; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; type MaxProxies = ConstU32<4>; type WeightInfo = (); type CallHasher = BlakeTwo256; type MaxPending = ConstU32<2>; - type AnnouncementDepositBase = ConstU64<1>; - type AnnouncementDepositFactor = ConstU64<1>; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; } use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::{Call as BalancesCall, Event as BalancesEvent}; +use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; type SystemError = frame_system::Error; -pub fn new_test_ext() -> sp_io::TestExternalities { +pub fn new_test_ext() -> TestState { let mut t = frame_system::GenesisConfig::::default() .build_storage() - .expect("Expected to not panic"); + .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)], - dev_accounts: None, + ..Default::default() } .assimilate_storage(&mut t) - .expect("Expected to not panic"); - let mut ext = sp_io::TestExternalities::new(t); + .unwrap(); + let mut ext = TestState::new(t); ext.execute_with(|| System::set_block_number(1)); ext } fn last_events(n: usize) -> Vec { - system::Pallet::::events() + frame_system::Pallet::::events() .into_iter() .rev() .take(n) @@ -380,7 +381,7 @@ fn delayed_requires_pre_announcement() { ); let call_hash = BlakeTwo256::hash_of(&call); assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced( RuntimeOrigin::signed(0), 2, @@ -417,7 +418,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { e ); - system::Pallet::::set_block_number(2); + frame_system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced( RuntimeOrigin::signed(0), 3, @@ -928,7 +929,6 @@ fn pure_works() { anon, 5 )); - assert_eq!(Balances::free_balance(6), 0); assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); @@ -946,7 +946,7 @@ fn pure_works() { None, call.clone() )); - let de: DispatchError = DispatchError::from(Error::::NoPermission).stripped(); + let de = DispatchError::from(Error::::NoPermission).stripped(); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), @@ -964,24 +964,286 @@ fn pure_works() { Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), Error::::NotProxy ); + }); +} - // Actually kill the pure proxy. - assert_ok!(Proxy::kill_pure( - RuntimeOrigin::signed(anon), - 1, +#[test] +fn poke_deposit_works_for_proxy_deposits() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, ProxyType::Any, - 0, - 1, 0 )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Change the proxy deposit base to trigger deposit update + ProxyDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + assert_eq!(Balances::reserved_balance(1), 3); // New Base(2) + Factor(1) * 1 System::assert_last_event( - ProxyEvent::PureKilled { - pure: anon, - spawner: 1, - proxy_type: ProxyType::Any, - disambiguation_index: 0, + ProxyEvent::DepositPoked { + who: 1, + kind: DepositKind::Proxies, + old_deposit: 2, + new_deposit: 3, + } + .into(), + ); + assert!(System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_works_for_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup proxy and make announcement + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 1 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Change announcement deposit base to trigger update + AnnouncementDepositBase::set(2); + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + let new_deposit = initial_deposit.saturating_add(1); // Base increased by 1 + assert_eq!(Balances::reserved_balance(3), new_deposit); + System::assert_last_event( + ProxyEvent::DepositPoked { + who: 3, + kind: DepositKind::Announcements, + old_deposit: initial_deposit, + new_deposit, } .into(), ); + assert!(System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_charges_fee_when_deposit_unchanged() { + new_test_ext().execute_with(|| { + // Add a proxy and check initial deposit + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 3, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(1)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(1), 2); // No change + + // No event emitted + assert!(!System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + + // Add an announcement and check initial deposit + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + let announcements = Announcements::::get(3); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(Balances::reserved_balance(3), announcements.1); + let initial_deposit = Balances::reserved_balance(3); + + // Poke the deposit without changing deposit required and check fee + let result = Proxy::poke_deposit(RuntimeOrigin::signed(3)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); // Pays fee + assert_eq!(Balances::reserved_balance(3), initial_deposit); // No change + + // No event emitted + assert!(!System::events().iter().any(|record| matches!( + record.event, + RuntimeEvent::Proxy(Event::DepositPoked { .. }) + ))); + }); +} + +#[test] +fn poke_deposit_handles_insufficient_balance() { + new_test_ext().execute_with(|| { + // Setup with account that has minimal balance + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(5), + 3, + ProxyType::Any, + 0 + )); + let initial_deposit = Balances::reserved_balance(5); + + // Change deposit base to require more than available balance + ProxyDepositBase::set(10); + + // Poking should fail due to insufficient balance + assert_noop!( + Proxy::poke_deposit(RuntimeOrigin::signed(5)), + BalancesError::::InsufficientBalance, + ); + + // Original deposit should remain unchanged + assert_eq!(Balances::reserved_balance(5), initial_deposit); + }); +} + +#[test] +fn poke_deposit_updates_both_proxy_and_announcement_deposits() { + new_test_ext().execute_with(|| { + // Setup both proxy and announcement for the same account + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(1), + 2, + ProxyType::Any, + 0 + )); + assert_eq!(Balances::reserved_balance(1), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::add_proxy( + RuntimeOrigin::signed(2), + 3, + ProxyType::Any, + 1 + )); + assert_eq!(Balances::reserved_balance(2), 2); // Base(1) + Factor(1) * 1 + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, [1; 32].into())); + let announcements = Announcements::::get(2); + assert_eq!( + announcements.0, + vec![Announcement { + real: 1, + call_hash: [1; 32].into(), + height: 1 + }] + ); + assert_eq!(announcements.1, 2); // Base(1) + Factor(1) * 1 + + // Record initial deposits + let initial_proxy_deposit = Proxies::::get(2).1; + let initial_announcement_deposit = Announcements::::get(2).1; + + // Total reserved = deposit for proxy + deposit for announcement + assert_eq!( + Balances::reserved_balance(2), + initial_proxy_deposit.saturating_add(initial_announcement_deposit) + ); + + // Change both deposit requirements + ProxyDepositBase::set(2); + AnnouncementDepositBase::set(2); + + // Poke deposits - should update both deposits and emit two events + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // Check both deposits were updated + let (_, new_proxy_deposit) = Proxies::::get(2); + let (_, new_announcement_deposit) = Announcements::::get(2); + assert_eq!(new_proxy_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!(new_announcement_deposit, 3); // Base(2) + Factor(1) * 1 + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + + // Verify both events were emitted in the correct order + let events = System::events(); + let relevant_events: Vec<_> = events + .iter() + .filter(|record| { + matches!( + record.event, + RuntimeEvent::Proxy(ProxyEvent::DepositPoked { .. }) + ) + }) + .collect(); + + assert_eq!(relevant_events.len(), 2); + + // First event should be for Proxies + assert_eq!( + relevant_events[0].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Proxies, + old_deposit: initial_proxy_deposit, + new_deposit: new_proxy_deposit, + } + .into() + ); + + // Second event should be for Announcements + assert_eq!( + relevant_events[1].event, + ProxyEvent::DepositPoked { + who: 2, + kind: DepositKind::Announcements, + old_deposit: initial_announcement_deposit, + new_deposit: new_announcement_deposit, + } + .into() + ); + + // Poking again should charge fee as nothing changes + let result = Proxy::poke_deposit(RuntimeOrigin::signed(2)); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // Verify deposits remained the same + assert_eq!(Proxies::::get(2).1, new_proxy_deposit); + assert_eq!(Announcements::::get(2).1, new_announcement_deposit); + assert_eq!( + Balances::reserved_balance(2), + new_proxy_deposit.saturating_add(new_announcement_deposit) + ); + }); +} + +#[test] +fn poke_deposit_fails_for_unsigned_origin() { + new_test_ext().execute_with(|| { + assert_noop!( + Proxy::poke_deposit(RuntimeOrigin::none()), + DispatchError::BadOrigin, + ); }); } diff --git a/pallets/proxy/src/weights.rs b/pallets/proxy/src/weights.rs index 3093298e3e..bb51872b2e 100644 --- a/pallets/proxy/src/weights.rs +++ b/pallets/proxy/src/weights.rs @@ -18,36 +18,38 @@ //! Autogenerated weights for `pallet_proxy` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-03-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `99fc4dfa9c86`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev +// --extrinsic=* +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_proxy +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/proxy/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_proxy +// --heap-pages=4096 +// --template=substrate/.maintain/frame-umbrella-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/proxy/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] +#![allow(dead_code)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; +use frame::weights_prelude::*; /// Weight functions needed for `pallet_proxy`. pub trait WeightInfo { @@ -61,6 +63,7 @@ pub trait WeightInfo { fn remove_proxies(p: u32, ) -> Weight; fn create_pure(p: u32, ) -> Weight; fn kill_pure(p: u32, ) -> Weight; + fn poke_deposit() -> Weight; } /// Weights for `pallet_proxy` using the Substrate node and recommended hardware. @@ -75,12 +78,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_280_000 picoseconds. - Weight::from_parts(19_655_145, 4706) - // Standard Error: 2_345 - .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -97,14 +100,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_789_000 picoseconds. - Weight::from_parts(41_812_078, 5698) - // Standard Error: 3_694 - .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) - // Standard Error: 3_817 - .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -116,14 +119,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_475_000 picoseconds. - Weight::from_parts(22_666_821, 5698) - // Standard Error: 1_797 - .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) - // Standard Error: 1_857 - .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -135,14 +138,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_326_000 picoseconds. - Weight::from_parts(22_654_227, 5698) - // Standard Error: 1_859 - .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) - // Standard Error: 1_921 - .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -156,14 +159,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 31_551_000 picoseconds. - Weight::from_parts(32_205_445, 5698) - // Standard Error: 4_089 - .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) - // Standard Error: 4_225 - .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -172,12 +175,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_358_457, 4706) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -186,12 +189,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_579_308, 4706) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -200,12 +203,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_541_000 picoseconds. - Weight::from_parts(21_456_750, 4706) - // Standard Error: 1_697 - .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -214,12 +217,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_878_644, 4706) - // Standard Error: 1_600 - .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -228,15 +231,30 @@ impl WeightInfo for SubstrateWeight { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `198 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_993_000 picoseconds. - Weight::from_parts(22_067_418, 4706) - // Standard Error: 1_673 - .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } } // For backwards compatibility and tests. @@ -250,12 +268,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `306 + p * (37 ±0)` + // Measured: `339 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 18_280_000 picoseconds. - Weight::from_parts(19_655_145, 4706) - // Standard Error: 2_345 - .saturating_add(Weight::from_parts(36_306, 0).saturating_mul(p.into())) + // Minimum execution time: 23_353_000 picoseconds. + Weight::from_parts(25_084_085, 4706) + // Standard Error: 2_569 + .saturating_add(Weight::from_parts(33_574, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Proxy::Proxies` (r:1 w:0) @@ -272,14 +290,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `633 + a * (68 ±0) + p * (37 ±0)` + // Measured: `666 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 41_789_000 picoseconds. - Weight::from_parts(41_812_078, 5698) - // Standard Error: 3_694 - .saturating_add(Weight::from_parts(163_029, 0).saturating_mul(a.into())) - // Standard Error: 3_817 - .saturating_add(Weight::from_parts(79_539, 0).saturating_mul(p.into())) + // Minimum execution time: 47_196_000 picoseconds. + Weight::from_parts(48_686_812, 5698) + // Standard Error: 3_711 + .saturating_add(Weight::from_parts(171_107, 0).saturating_mul(a.into())) + // Standard Error: 3_834 + .saturating_add(Weight::from_parts(34_523, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -291,14 +309,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_475_000 picoseconds. - Weight::from_parts(22_666_821, 5698) - // Standard Error: 1_797 - .saturating_add(Weight::from_parts(170_629, 0).saturating_mul(a.into())) - // Standard Error: 1_857 - .saturating_add(Weight::from_parts(18_799, 0).saturating_mul(p.into())) + // Minimum execution time: 29_341_000 picoseconds. + Weight::from_parts(30_320_504, 5698) + // Standard Error: 1_821 + .saturating_add(Weight::from_parts(158_572, 0).saturating_mul(a.into())) + // Standard Error: 1_881 + .saturating_add(Weight::from_parts(8_433, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -310,14 +328,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `403 + a * (68 ±0)` + // Measured: `436 + a * (68 ±0)` // Estimated: `5698` - // Minimum execution time: 22_326_000 picoseconds. - Weight::from_parts(22_654_227, 5698) - // Standard Error: 1_859 - .saturating_add(Weight::from_parts(168_822, 0).saturating_mul(a.into())) - // Standard Error: 1_921 - .saturating_add(Weight::from_parts(21_839, 0).saturating_mul(p.into())) + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(29_754_384, 5698) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(176_827, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(9_607, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -331,14 +349,14 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `420 + a * (68 ±0) + p * (37 ±0)` + // Measured: `453 + a * (68 ±0) + p * (37 ±0)` // Estimated: `5698` - // Minimum execution time: 31_551_000 picoseconds. - Weight::from_parts(32_205_445, 5698) - // Standard Error: 4_089 - .saturating_add(Weight::from_parts(167_596, 0).saturating_mul(a.into())) - // Standard Error: 4_225 - .saturating_add(Weight::from_parts(67_833, 0).saturating_mul(p.into())) + // Minimum execution time: 36_885_000 picoseconds. + Weight::from_parts(38_080_636, 5698) + // Standard Error: 2_642 + .saturating_add(Weight::from_parts(157_335, 0).saturating_mul(a.into())) + // Standard Error: 2_730 + .saturating_add(Weight::from_parts(28_872, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -347,12 +365,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_358_457, 4706) - // Standard Error: 1_606 - .saturating_add(Weight::from_parts(64_322, 0).saturating_mul(p.into())) + // Minimum execution time: 27_016_000 picoseconds. + Weight::from_parts(28_296_216, 4706) + // Standard Error: 1_643 + .saturating_add(Weight::from_parts(50_271, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -361,12 +379,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 21_495_000 picoseconds. - Weight::from_parts(22_579_308, 4706) - // Standard Error: 2_571 - .saturating_add(Weight::from_parts(62_404, 0).saturating_mul(p.into())) + // Minimum execution time: 26_955_000 picoseconds. + Weight::from_parts(28_379_566, 4706) + // Standard Error: 1_547 + .saturating_add(Weight::from_parts(45_784, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -375,12 +393,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `161 + p * (37 ±0)` + // Measured: `194 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_541_000 picoseconds. - Weight::from_parts(21_456_750, 4706) - // Standard Error: 1_697 - .saturating_add(Weight::from_parts(45_387, 0).saturating_mul(p.into())) + // Minimum execution time: 24_656_000 picoseconds. + Weight::from_parts(25_821_878, 4706) + // Standard Error: 2_300 + .saturating_add(Weight::from_parts(33_972, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -389,12 +407,12 @@ impl WeightInfo for () { /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `173` + // Measured: `206` // Estimated: `4706` - // Minimum execution time: 22_809_000 picoseconds. - Weight::from_parts(23_878_644, 4706) - // Standard Error: 1_600 - .saturating_add(Weight::from_parts(10_149, 0).saturating_mul(p.into())) + // Minimum execution time: 28_416_000 picoseconds. + Weight::from_parts(29_662_728, 4706) + // Standard Error: 1_851 + .saturating_add(Weight::from_parts(29_928, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -403,13 +421,28 @@ impl WeightInfo for () { /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `198 + p * (37 ±0)` + // Measured: `231 + p * (37 ±0)` // Estimated: `4706` - // Minimum execution time: 20_993_000 picoseconds. - Weight::from_parts(22_067_418, 4706) - // Standard Error: 1_673 - .saturating_add(Weight::from_parts(52_703, 0).saturating_mul(p.into())) + // Minimum execution time: 25_505_000 picoseconds. + Weight::from_parts(26_780_627, 4706) + // Standard Error: 1_581 + .saturating_add(Weight::from_parts(33_085, 0).saturating_mul(p.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `519` + // Estimated: `5698` + // Minimum execution time: 46_733_000 picoseconds. + Weight::from_parts(47_972_000, 5698) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } } From 42913575067015a9d7f96e108a8d7ad15d40368b Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 15:24:32 -0300 Subject: [PATCH 212/379] upgrade pallet-utility --- Cargo.lock | 10 +- pallets/utility/Cargo.toml | 18 ++- pallets/utility/src/benchmarking.rs | 122 ++++++++++------ pallets/utility/src/lib.rs | 197 +++++++++++++++++++++---- pallets/utility/src/tests.rs | 216 ++++++++++++++++++++++++++-- pallets/utility/src/weights.rs | 150 ++++++++++++------- 6 files changed, 557 insertions(+), 156 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf296b01cc..407336be13 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6929,7 +6929,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", "polkadot-runtime-common", "precompile-utils", @@ -7949,7 +7949,7 @@ name = "pallet-proxy" version = "40.1.0" dependencies = [ "pallet-balances", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", "polkadot-sdk-frame", "scale-info", @@ -8008,7 +8008,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-proxy 40.1.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", - "pallet-utility 40.0.0", + "pallet-utility 40.0.0 (git+https://github.com/paritytech/polkadot-sdk.git?tag=polkadot-stable2503-6)", "parity-scale-codec", "scale-info", "sp-arithmetic", @@ -8128,7 +8128,7 @@ dependencies = [ "pallet-preimage", "pallet-scheduler", "pallet-subtensor-swap", - "pallet-utility 38.0.0", + "pallet-utility 40.0.0", "parity-scale-codec", "parity-util-mem", "polkadot-runtime-common", @@ -8308,7 +8308,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "38.0.0" +version = "40.0.0" dependencies = [ "frame-benchmarking", "frame-support", diff --git a/pallets/utility/Cargo.toml b/pallets/utility/Cargo.toml index 01ecd42166..08df4734c0 100644 --- a/pallets/utility/Cargo.toml +++ b/pallets/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "38.0.0" +version = "40.0.0" edition.workspace = true license = "Apache-2.0" description = "FRAME utilities pallet" @@ -13,20 +13,20 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { workspace = true, optional = true } +codec = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support.workspace = true frame-system.workspace = true +scale-info = { features = ["derive"], workspace = true } sp-core.workspace = true sp-io.workspace = true sp-runtime.workspace = true -codec = { workspace = true, features = ["derive"] } -scale-info = { workspace = true, features = ["derive"] } subtensor-macros.workspace = true [dev-dependencies] -pallet-collective.workspace = true -pallet-root-testing.workspace = true pallet-balances = { workspace = true, default-features = true } +pallet-collective = { workspace = true, default-features = true } +pallet-root-testing = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } @@ -41,24 +41,22 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "pallet-collective/std", - "pallet-root-testing/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-collective/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", - "sp-runtime/try-runtime", "pallet-balances/try-runtime", "pallet-collective/try-runtime", "pallet-root-testing/try-runtime", "pallet-timestamp/try-runtime", + "sp-runtime/try-runtime", ] diff --git a/pallets/utility/src/benchmarking.rs b/pallets/utility/src/benchmarking.rs index 6980552c36..4a9e0ca306 100644 --- a/pallets/utility/src/benchmarking.rs +++ b/pallets/utility/src/benchmarking.rs @@ -19,73 +19,105 @@ #![cfg(feature = "runtime-benchmarks")] -use super::*; -use alloc::{vec, vec::Vec}; -use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller}; +use alloc::vec; +use frame_benchmarking::{benchmarking::add_to_whitelist, v2::*}; use frame_system::RawOrigin; +use crate::*; + const SEED: u32 = 0; fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -benchmarks! { - where_clause { where ::PalletsOrigin: Clone } - batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } +#[benchmarks] +mod benchmark { + use super::*; + + #[benchmark] + fn batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - as_derivative { + #[benchmark] + fn as_derivative() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), SEED as u16, call) - - batch_all { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + add_to_whitelist(caller_key.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), SEED as u16, call); + } + + #[benchmark] + fn batch_all(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; + let caller = whitelisted_caller(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); + } + + #[benchmark] + fn dispatch_as() { + let caller = account("caller", SEED, SEED); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); + let origin = T::RuntimeOrigin::from(RawOrigin::Signed(caller)); + let pallets_origin = origin.caller().clone(); + let pallets_origin = T::PalletsOrigin::from(pallets_origin); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(pallets_origin), call); + } + + #[benchmark] + fn force_batch(c: Linear<0, 1000>) { + let calls = vec![frame_system::Call::remark { remark: vec![] }.into(); c as usize]; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), calls); + + assert_last_event::(Event::BatchCompleted.into()); } - dispatch_as { + #[benchmark] + fn dispatch_as_fallible() { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); - let pallets_origin: ::PalletsOrigin = origin.caller().clone(); - let pallets_origin = Into::::into(pallets_origin); - }: _(RawOrigin::Root, Box::new(pallets_origin), call) - - force_batch { - let c in 0 .. 1000; - let mut calls: Vec<::RuntimeCall> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark { remark: vec![] }.into(); - calls.push(call); - } + let pallets_origin = origin.caller().clone(); + let pallets_origin = T::PalletsOrigin::from(pallets_origin); + + #[extrinsic_call] + _(RawOrigin::Root, Box::new(pallets_origin), call); + } + + #[benchmark] + fn if_else() { + // Failing main call. + let main_call = Box::new(frame_system::Call::set_code { code: vec![1] }.into()); + let fallback_call = Box::new(frame_system::Call::remark { remark: vec![1] }.into()); let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), calls) - verify { - assert_last_event::(Event::BatchCompleted.into()) + + #[extrinsic_call] + _(RawOrigin::Signed(caller), main_call, fallback_call); } - impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite! { + Pallet, + tests::new_test_ext(), + tests::Test + } } diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index 8ee888889e..efcc434e8c 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -61,7 +61,10 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, Encode}; use frame_support::{ - dispatch::{GetDispatchInfo, PostDispatchInfo, extract_actual_weight}, + dispatch::{ + DispatchClass::{Normal, Operational}, + GetDispatchInfo, PostDispatchInfo, extract_actual_weight, + }, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; @@ -122,6 +125,10 @@ pub mod pallet { ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, + /// Main call was dispatched. + IfElseMainSuccess, + /// The fallback call was dispatched. + IfElseFallbackCalled { main_error: DispatchError }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm @@ -135,19 +142,13 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let size = core::mem::size_of::<::RuntimeCall>() as u32; + let call_size = (core::mem::size_of::<::RuntimeCall>() as u32) + .div_ceil(CALL_ALIGN) + * CALL_ALIGN; + // The margin to take into account vec doubling capacity. + let margin_factor = 3; - let align_up = size.saturating_add(CALL_ALIGN.saturating_sub(1)); - let call_size = align_up - .checked_div(CALL_ALIGN) - .unwrap_or(0) - .saturating_mul(CALL_ALIGN); - - let margin_factor: u32 = 3; - - let after_margin = allocator_limit.checked_div(margin_factor).unwrap_or(0); - - after_margin.checked_div(call_size).unwrap_or(0) + allocator_limit / margin_factor / call_size } } @@ -157,7 +158,8 @@ pub mod pallet { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( core::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, - "Call enum size should be smaller than {CALL_ALIGN} bytes.", + "Call enum size should be smaller than {} bytes.", + CALL_ALIGN, ); } } @@ -190,9 +192,9 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn batch( origin: OriginFor, @@ -302,9 +304,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn batch_all( origin: OriginFor, @@ -401,9 +403,9 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let dispatch_weight = Pallet::::weight(calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); - (dispatch_weight, DispatchClass::Normal) + (dispatch_weight, dispatch_class, pays) })] pub fn force_batch( origin: OriginFor, @@ -470,19 +472,158 @@ pub mod pallet { let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } + + /// Dispatch a fallback call in the event the main call fails to execute. + /// May be called from any origin except `None`. + /// + /// This function first attempts to dispatch the `main` call. + /// If the `main` call fails, the `fallback` is attemted. + /// if the fallback is successfully dispatched, the weights of both calls + /// are accumulated and an event containing the main call error is deposited. + /// + /// In the event of a fallback failure the whole call fails + /// with the weights returned. + /// + /// - `main`: The main call to be dispatched. This is the primary action to execute. + /// - `fallback`: The fallback call to be dispatched in case the `main` call fails. + /// + /// ## Dispatch Logic + /// - If the origin is `root`, both the main and fallback calls are executed without + /// applying any origin filters. + /// - If the origin is not `root`, the origin filter is applied to both the `main` and + /// `fallback` calls. + /// + /// ## Use Case + /// - Some use cases might involve submitting a `batch` type call in either main, fallback + /// or both. + #[pallet::call_index(6)] + #[pallet::weight({ + let main = main.get_dispatch_info(); + let fallback = fallback.get_dispatch_info(); + ( + T::WeightInfo::if_else() + .saturating_add(main.call_weight) + .saturating_add(fallback.call_weight), + if main.class == Operational && fallback.class == Operational { Operational } else { Normal }, + ) + })] + pub fn if_else( + origin: OriginFor, + main: Box<::RuntimeCall>, + fallback: Box<::RuntimeCall>, + ) -> DispatchResultWithPostInfo { + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()); + } + + let is_root = ensure_root(origin.clone()).is_ok(); + + // Track the weights + let mut weight = T::WeightInfo::if_else(); + + let main_info = main.get_dispatch_info(); + + // Execute the main call first + let main_result = if is_root { + main.dispatch_bypass_filter(origin.clone()) + } else { + main.dispatch(origin.clone()) + }; + + // Add weight of the main call + weight = weight.saturating_add(extract_actual_weight(&main_result, &main_info)); + + let Err(main_error) = main_result else { + // If the main result is Ok, we skip the fallback logic entirely + Self::deposit_event(Event::IfElseMainSuccess); + return Ok(Some(weight).into()); + }; + + // If the main call failed, execute the fallback call + let fallback_info = fallback.get_dispatch_info(); + + let fallback_result = if is_root { + fallback.dispatch_bypass_filter(origin.clone()) + } else { + fallback.dispatch(origin) + }; + + // Add weight of the fallback call + weight = weight.saturating_add(extract_actual_weight(&fallback_result, &fallback_info)); + + let Err(fallback_error) = fallback_result else { + // Fallback succeeded. + Self::deposit_event(Event::IfElseFallbackCalled { + main_error: main_error.error, + }); + return Ok(Some(weight).into()); + }; + + // Both calls have failed, return fallback error + Err(sp_runtime::DispatchErrorWithPostInfo { + error: fallback_error.error, + post_info: Some(weight).into(), + }) + } + + /// Dispatches a function call with a provided origin. + /// + /// Almost the same as [`Pallet::dispatch_as`] but forwards any error of the inner call. + /// + /// The dispatch origin for this call must be _Root_. + #[pallet::call_index(7)] + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as_fallible() + .saturating_add(dispatch_info.call_weight), + dispatch_info.class, + ) + })] + pub fn dispatch_as_fallible( + origin: OriginFor, + as_origin: Box, + call: Box<::RuntimeCall>, + ) -> DispatchResult { + ensure_root(origin)?; + + call.dispatch_bypass_filter((*as_origin).into()) + .map_err(|e| e.error)?; + + Self::deposit_event(Event::DispatchedAs { result: Ok(()) }); + + Ok(()) + } } impl Pallet { /// Get the accumulated `weight` and the dispatch class for the given `calls`. - fn weight(calls: &[::RuntimeCall]) -> Weight { + fn weight_and_dispatch_class( + calls: &[::RuntimeCall], + ) -> (Weight, DispatchClass, Pays) { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()); - dispatch_infos.fold(Weight::zero(), |total_weight, di| { - if di.pays_fee == Pays::Yes { - total_weight.saturating_add(di.call_weight) - } else { - total_weight - } - }) + let pays = if dispatch_infos.clone().any(|di| di.pays_fee == Pays::No) { + Pays::No + } else { + Pays::Yes + }; + let (dispatch_weight, dispatch_class) = dispatch_infos.fold( + (Weight::zero(), DispatchClass::Operational), + |(total_weight, dispatch_class): (Weight, DispatchClass), di| { + ( + total_weight.saturating_add(di.call_weight), + // If not all are `Operational`, we want to use `DispatchClass::Normal`. + if di.class == DispatchClass::Normal { + di.class + } else { + dispatch_class + }, + ) + }, + ); + + (dispatch_weight, dispatch_class, pays) } } } diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index a883f1b690..5c0bd3568f 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -18,7 +18,6 @@ // Tests for Utility Pallet #![cfg(test)] -#![allow(clippy::arithmetic_side_effects)] use super::*; @@ -40,7 +39,6 @@ type BlockNumber = u64; // example module to test behaviors. #[frame_support::pallet(dev_mode)] -#[allow(clippy::large_enum_variant)] pub mod example { use frame_support::{dispatch::WithPostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; @@ -129,14 +127,14 @@ type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Test { - System: frame_system = 1, - Timestamp: pallet_timestamp = 2, - Balances: pallet_balances = 3, - RootTesting: pallet_root_testing = 4, - Council: pallet_collective:: = 5, - Utility: utility = 6, - Example: example = 7, - Democracy: mock_democracy = 8, + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + RootTesting: pallet_root_testing, + Council: pallet_collective::, + Utility: utility, + Example: example, + Democracy: mock_democracy, } ); @@ -257,20 +255,20 @@ use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default() .build_storage() - .expect("Failed to build storage for test"); + .unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - dev_accounts: None, + ..Default::default() } .assimilate_storage(&mut t) - .expect("Failed to build storage for test"); + .unwrap(); pallet_collective::GenesisConfig:: { members: vec![1, 2, 3], phantom: Default::default(), } .assimilate_storage(&mut t) - .expect("Failed to build storage for test"); + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -289,6 +287,20 @@ fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> R }) } +fn utility_events() -> Vec { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let RuntimeEvent::Utility(inner) = e { + Some(inner) + } else { + None + } + }) + .collect() +} + #[test] fn as_derivative_works() { new_test_ext().execute_with(|| { @@ -708,7 +720,7 @@ fn batch_all_handles_weight_refund() { assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight - ::WeightInfo::batch_all(2).saturating_add(end_weight.saturating_mul(2)), + ::WeightInfo::batch_all(2) + end_weight * 2, ); }); } @@ -1006,3 +1018,177 @@ fn with_weight_works() { ); }) } + +#[test] +fn dispatch_as_works() { + new_test_ext().execute_with(|| { + Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap(); + assert_eq!(Balances::free_balance(666), 100); + assert_eq!(Balances::free_balance(777), 0); + assert_ok!(Utility::dispatch_as( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))), + Box::new(call_transfer(777, 100)) + )); + assert_eq!(Balances::free_balance(666), 0); + assert_eq!(Balances::free_balance(777), 100); + + System::reset_events(); + assert_ok!(Utility::dispatch_as( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))), + Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 })) + )); + assert_eq!( + utility_events(), + vec![Event::DispatchedAs { + result: Err(DispatchError::BadOrigin) + }] + ); + }) +} + +#[test] +fn if_else_with_root_works() { + new_test_ext().execute_with(|| { + let k = b"a".to_vec(); + let call = RuntimeCall::System(frame_system::Call::set_storage { + items: vec![(k.clone(), k.clone())], + }); + assert!(!TestBaseCallFilter::contains(&call)); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::root(), + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 11 + }) + .into(), + call.into(), + )); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_eq!(storage::unhashed::get_raw(&k), Some(k)); + System::assert_last_event( + utility::Event::IfElseFallbackCalled { + main_error: TokenError::FundsUnavailable.into(), + } + .into(), + ); + }); +} + +#[test] +fn if_else_with_signed_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 11).into(), + call_transfer(2, 5).into() + )); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + + System::assert_last_event( + utility::Event::IfElseFallbackCalled { + main_error: TokenError::FundsUnavailable.into(), + } + .into(), + ); + }); +} + +#[test] +fn if_else_successful_main_call() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 9).into(), + call_transfer(2, 1).into() + )); + assert_eq!(Balances::free_balance(1), 1); + assert_eq!(Balances::free_balance(2), 19); + + System::assert_last_event(utility::Event::IfElseMainSuccess.into()); + }) +} + +#[test] +fn dispatch_as_fallible_works() { + new_test_ext().execute_with(|| { + Balances::force_set_balance(RuntimeOrigin::root(), 666, 100).unwrap(); + assert_eq!(Balances::free_balance(666), 100); + assert_eq!(Balances::free_balance(777), 0); + assert_ok!(Utility::dispatch_as_fallible( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(666))), + Box::new(call_transfer(777, 100)) + )); + assert_eq!(Balances::free_balance(666), 0); + assert_eq!(Balances::free_balance(777), 100); + + assert_noop!( + Utility::dispatch_as_fallible( + RuntimeOrigin::root(), + Box::new(OriginCaller::system(frame_system::RawOrigin::Signed(777))), + Box::new(RuntimeCall::Timestamp(TimestampCall::set { now: 0 })) + ), + DispatchError::BadOrigin, + ); + }) +} + +#[test] +fn if_else_failing_fallback_call() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_err_ignore_postinfo!( + Utility::if_else( + RuntimeOrigin::signed(1), + call_transfer(2, 11).into(), + call_transfer(2, 11).into() + ), + TokenError::FundsUnavailable + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + }) +} + +#[test] +fn if_else_with_nested_if_else_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + + let main_call = call_transfer(2, 11).into(); + let fallback_call = call_transfer(2, 5).into(); + + let nested_if_else_call = RuntimeCall::Utility(UtilityCall::if_else { + main: main_call, + fallback: fallback_call, + }) + .into(); + + // Nested `if_else` call. + assert_ok!(Utility::if_else( + RuntimeOrigin::signed(1), + nested_if_else_call, + call_transfer(2, 7).into() + )); + + // inner if_else fallback is executed. + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + + // Ensure the correct event was triggered for the main call(nested if_else). + System::assert_last_event(utility::Event::IfElseMainSuccess.into()); + }); +} diff --git a/pallets/utility/src/weights.rs b/pallets/utility/src/weights.rs index 502f85a3f1..eb1f036087 100644 --- a/pallets/utility/src/weights.rs +++ b/pallets/utility/src/weights.rs @@ -18,33 +18,37 @@ //! Autogenerated weights for `pallet_utility` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev +// --extrinsic=* +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_utility +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/utility/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_utility +// --heap-pages=4096 +// --template=substrate/.maintain/frame-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/utility/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] +#![allow(dead_code)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; @@ -56,6 +60,8 @@ pub trait WeightInfo { fn batch_all(c: u32, ) -> Weight; fn dispatch_as() -> Weight; fn force_batch(c: u32, ) -> Weight; + fn dispatch_as_fallible() -> Weight; + fn if_else() -> Weight; } /// Weights for `pallet_utility` using the Substrate node and recommended hardware. @@ -68,12 +74,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 3_972_000 picoseconds. + Weight::from_parts(4_034_000, 3997) + // Standard Error: 2_323 + .saturating_add(Weight::from_parts(4_914_560, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -82,10 +88,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 5_866_000 picoseconds. + Weight::from_parts(6_097_000, 3997) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -95,20 +101,20 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 3_983_000 picoseconds. + Weight::from_parts(4_075_000, 3997) + // Standard Error: 2_176 + .saturating_add(Weight::from_parts(5_127_263, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(5_720_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -117,14 +123,33 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 3_880_000 picoseconds. + Weight::from_parts(4_035_000, 3997) + // Standard Error: 1_682 + .saturating_add(Weight::from_parts(4_902_729, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } + fn dispatch_as_fallible() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_417_000 picoseconds. + Weight::from_parts(5_705_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:2 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `7004` + // Minimum execution time: 11_273_000 picoseconds. + Weight::from_parts(11_571_000, 7004) + .saturating_add(T::DbWeight::get().reads(3_u64)) + } } // For backwards compatibility and tests. @@ -136,12 +161,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(2_694_370, 3997) - // Standard Error: 5_055 - .saturating_add(Weight::from_parts(5_005_941, 0).saturating_mul(c.into())) + // Minimum execution time: 3_972_000 picoseconds. + Weight::from_parts(4_034_000, 3997) + // Standard Error: 2_323 + .saturating_add(Weight::from_parts(4_914_560, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -150,10 +175,10 @@ impl WeightInfo for () { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn as_derivative() -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 9_263_000 picoseconds. - Weight::from_parts(9_639_000, 3997) + // Minimum execution time: 5_866_000 picoseconds. + Weight::from_parts(6_097_000, 3997) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) @@ -163,20 +188,20 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(12_948_874, 3997) - // Standard Error: 4_643 - .saturating_add(Weight::from_parts(5_162_821, 0).saturating_mul(c.into())) + // Minimum execution time: 3_983_000 picoseconds. + Weight::from_parts(4_075_000, 3997) + // Standard Error: 2_176 + .saturating_add(Weight::from_parts(5_127_263, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn dispatch_as() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_126_000 picoseconds. - Weight::from_parts(7_452_000, 0) + // Minimum execution time: 5_530_000 picoseconds. + Weight::from_parts(5_720_000, 0) } /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -185,12 +210,31 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 5_254_000 picoseconds. - Weight::from_parts(4_879_712, 3997) - // Standard Error: 4_988 - .saturating_add(Weight::from_parts(4_955_816, 0).saturating_mul(c.into())) + // Minimum execution time: 3_880_000 picoseconds. + Weight::from_parts(4_035_000, 3997) + // Standard Error: 1_682 + .saturating_add(Weight::from_parts(4_902_729, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } + fn dispatch_as_fallible() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_417_000 picoseconds. + Weight::from_parts(5_705_000, 0) + } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:2 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `7004` + // Minimum execution time: 11_273_000 picoseconds. + Weight::from_parts(11_571_000, 7004) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + } } From 383956c5d9dc66cddd914dbb2d4e70b2496b44bc Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Thu, 11 Sep 2025 14:27:20 -0400 Subject: [PATCH 213/379] lower hyperparameter rate limit to 2 tempos --- pallets/admin-utils/src/tests/mod.rs | 41 +++++++++++++------- pallets/subtensor/src/tests/ensure.rs | 15 ++++--- pallets/subtensor/src/utils/rate_limiting.rs | 3 ++ 3 files changed, 40 insertions(+), 19 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 25b1b89607..711569726b 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2097,10 +2097,12 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { let owner: U256 = U256::from(5); SubnetOwner::::insert(netuid, owner); - // Configure owner hyperparam RL to 2 blocks - assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( + // Set tempo to 1 so owner hyperparam RL = 2 tempos = 2 blocks + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window to avoid blocking on small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( <::RuntimeOrigin>::root(), - 2 + 0 )); // First update succeeds @@ -2140,10 +2142,9 @@ fn test_owner_hyperparam_update_rate_limit_enforced() { }); } -// Verifies that when the owner hyperparameter rate limit is left at its default (0), hyperparameter -// updates are not blocked until a non-zero value is set. +// Verifies that owner hyperparameter rate limit is enforced based on tempo (2 tempos). #[test] -fn test_hyperparam_rate_limit_not_blocking_with_default() { +fn test_hyperparam_rate_limit_enforced_by_tempo() { new_test_ext().execute_with(|| { // Setup subnet and owner let netuid = NetUid::from(42); @@ -2151,23 +2152,37 @@ fn test_hyperparam_rate_limit_not_blocking_with_default() { let owner: U256 = U256::from(77); SubnetOwner::::insert(netuid, owner); - // Read the default (unset) owner hyperparam rate limit - let default_limit = pallet_subtensor::OwnerHyperparamRateLimit::::get(); - - assert_eq!(default_limit, 0); + // Set tempo to 1 so RL = 2 blocks + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window to avoid blocking on small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 0 + )); - // First owner update should always succeed + // First owner update should succeed assert_ok!(AdminUtils::sudo_set_kappa( <::RuntimeOrigin>::signed(owner), netuid, 1 )); - // With default == 0, second immediate update should also pass (no rate limiting) + // Immediate second update should fail due to tempo-based RL + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 2 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance 2 blocks (2 tempos with tempo=1) then succeed + run_to_block(SubtensorModule::get_current_block_as_u64() + 2); assert_ok!(AdminUtils::sudo_set_kappa( <::RuntimeOrigin>::signed(owner), netuid, - 2 + 3 )); }); } diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index a8b3843fa3..e7bd70fb5a 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -65,11 +65,11 @@ fn ensure_subnet_owner_or_root_distinguishes_root_and_owner() { fn ensure_root_with_rate_limit_blocks_in_freeze_window() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1); - let tempo = 10; + let tempo: u16 = 10; add_network(netuid, 10, 0); // Set freeze window to 3 - let freeze_window = 3; + let freeze_window: u16 = 3; crate::Pallet::::set_admin_freeze_window(freeze_window); run_to_block((tempo - freeze_window + 1).into()); @@ -94,12 +94,12 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { SubtokenEnabled::::insert(netuid, true); let owner: U256 = U256::from(5); SubnetOwner::::insert(netuid, owner); - // Set freeze window to 3 + // Set freeze window to 0 initially to avoid blocking when tempo is small let freeze_window = 3; - crate::Pallet::::set_admin_freeze_window(freeze_window); + crate::Pallet::::set_admin_freeze_window(0); - // Set owner RL to 2 blocks - crate::Pallet::::set_owner_hyperparam_rate_limit(2); + // Set tempo to 1 so owner hyperparam RL = 2 blocks + crate::Pallet::::set_tempo(netuid, 1); // Outside freeze window initially; should pass and return Some(owner) let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( @@ -135,6 +135,9 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { // Now advance into the freeze window; ensure blocks // (using loop for clarity, because epoch calculation function uses netuid) + // Restore tempo and configure freeze window for this part + crate::Pallet::::set_tempo(netuid, tempo); + crate::Pallet::::set_admin_freeze_window(freeze_window); let freeze_window = freeze_window as u64; loop { let cur = crate::Pallet::::get_current_block_as_u64(); diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index e9a8bb7b12..319f759411 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -69,6 +69,9 @@ impl Pallet { match tx_type { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), + // Owner hyperparameter updates are now rate-limited by 2 tempos on the subnet + TransactionType::OwnerHyperparamUpdate => + (Tempo::::get(netuid) as u64).saturating_mul(2), TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), _ => Self::get_rate_limit(tx_type), From 4fb2b0d7d3fcfae9055186e9f5c3edcb0d8b5ab9 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 15:30:33 -0300 Subject: [PATCH 214/379] fix missing block provider --- runtime/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d49d5147e9..7e44a3dc2d 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -884,6 +884,7 @@ impl pallet_proxy::Config for Runtime { type CallHasher = BlakeTwo256; type AnnouncementDepositBase = AnnouncementDepositBase; type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = System; } pub struct Proxier; From d01252741aac3e2b1bd45eefc5501699e54a49aa Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 15:50:04 -0300 Subject: [PATCH 215/379] fix issues --- pallets/proxy/src/benchmarking.rs | 1 + pallets/proxy/src/lib.rs | 31 ++++++++++++++----------------- pallets/proxy/src/tests.rs | 10 +++++----- pallets/utility/src/lib.rs | 14 +++++++------- pallets/utility/src/tests.rs | 16 ++++++++-------- 5 files changed, 35 insertions(+), 37 deletions(-) diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs index e24e877160..ff881912d1 100644 --- a/pallets/proxy/src/benchmarking.rs +++ b/pallets/proxy/src/benchmarking.rs @@ -18,6 +18,7 @@ // Benchmarks for Proxy Pallet #![cfg(feature = "runtime-benchmarks")] +#![allow(clippy::arithmetic_side_effects)] use super::*; use crate::Pallet as Proxy; diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs index 807bac088c..a4325bd099 100644 --- a/pallets/proxy/src/lib.rs +++ b/pallets/proxy/src/lib.rs @@ -819,13 +819,13 @@ impl Pallet { /// /// - `who`: The spawner account. /// - `proxy_type`: The type of the proxy that the sender will be registered as over the - /// new account. This will almost always be the most permissive `ProxyType` possible to - /// allow for maximum flexibility. + /// new account. This will almost always be the most permissive `ProxyType` possible to + /// allow for maximum flexibility. /// - `index`: A disambiguation index, in case this is called multiple times in the same - /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just - /// want to use `0`. + /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just + /// want to use `0`. /// - `maybe_when`: The block height and extrinsic index of when the pure account was - /// created. None to use current block height and extrinsic index. + /// created. None to use current block height and extrinsic index. pub fn pure_account( who: &T::AccountId, proxy_type: &T::ProxyType, @@ -858,7 +858,7 @@ impl Pallet { /// - `delegatee`: The account that the `delegator` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. pub fn add_proxy_delegate( delegator: &T::AccountId, delegatee: T::AccountId, @@ -881,9 +881,9 @@ impl Pallet { .map_err(|_| Error::::TooMany)?; let new_deposit = Self::deposit(proxies.len() as u32); if new_deposit > *deposit { - T::Currency::reserve(delegator, new_deposit - *deposit)?; + T::Currency::reserve(delegator, new_deposit.saturating_sub(*deposit))?; } else if new_deposit < *deposit { - T::Currency::unreserve(delegator, *deposit - new_deposit); + T::Currency::unreserve(delegator, (*deposit).saturating_sub(new_deposit)); } *deposit = new_deposit; Self::deposit_event(Event::::ProxyAdded { @@ -903,7 +903,7 @@ impl Pallet { /// - `delegatee`: The account that the `delegator` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// zero. pub fn remove_proxy_delegate( delegator: &T::AccountId, delegatee: T::AccountId, @@ -924,9 +924,9 @@ impl Pallet { proxies.remove(i); let new_deposit = Self::deposit(proxies.len() as u32); if new_deposit > old_deposit { - T::Currency::reserve(delegator, new_deposit - old_deposit)?; + T::Currency::reserve(delegator, new_deposit.saturating_sub(old_deposit))?; } else if new_deposit < old_deposit { - T::Currency::unreserve(delegator, old_deposit - new_deposit); + T::Currency::unreserve(delegator, old_deposit.saturating_sub(new_deposit)); } if !proxies.is_empty() { *x = Some((proxies, new_deposit)) @@ -1006,10 +1006,7 @@ impl Pallet { force_proxy_type: Option, ) -> Result>, DispatchError> { let f = |x: &ProxyDefinition>| -> bool { - &x.delegate == delegate - && force_proxy_type - .as_ref() - .map_or(true, |y| &x.proxy_type == y) + &x.delegate == delegate && force_proxy_type.as_ref().is_none_or(|y| &x.proxy_type == y) }; Ok(Proxies::::get(real) .0 @@ -1059,7 +1056,7 @@ impl Pallet { /// Parameters: /// - `delegator`: The delegator account. pub fn remove_all_proxy_delegates(delegator: &T::AccountId) { - let (_, old_deposit) = Proxies::::take(&delegator); - T::Currency::unreserve(&delegator, old_deposit); + let (_, old_deposit) = Proxies::::take(delegator); + T::Currency::unreserve(delegator, old_deposit); } } diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index b6734b80a8..87b56531c1 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -27,11 +27,11 @@ use frame::testing_prelude::*; type Block = frame_system::mocking::MockBlock; construct_runtime!( - pub struct Test { - System: frame_system, - Balances: pallet_balances, - Proxy: proxy, - Utility: pallet_utility, + pub enum Test { + System: frame_system = 1, + Balances: pallet_balances = 2, + Proxy: proxy = 3, + Utility: pallet_utility = 4, } ); diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index efcc434e8c..577ce8e941 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -144,11 +144,12 @@ pub mod pallet { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = (core::mem::size_of::<::RuntimeCall>() as u32) .div_ceil(CALL_ALIGN) - * CALL_ALIGN; + .checked_mul(CALL_ALIGN) + .unwrap_or(u32::MAX); // The margin to take into account vec doubling capacity. let margin_factor = 3; - allocator_limit / margin_factor / call_size + allocator_limit.checked_div(margin_factor).map_or(0, |x| x.checked_div(call_size).unwrap_or(0)) } } @@ -158,8 +159,7 @@ pub mod pallet { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( core::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, - "Call enum size should be smaller than {} bytes.", - CALL_ALIGN, + "Call enum size should be smaller than {CALL_ALIGN} bytes.", ); } } @@ -192,7 +192,7 @@ pub mod pallet { /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ - let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch(calls.len() as u32)); (dispatch_weight, dispatch_class, pays) })] @@ -304,7 +304,7 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ - let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); (dispatch_weight, dispatch_class, pays) })] @@ -403,7 +403,7 @@ pub mod pallet { /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ - let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(&calls); + let (dispatch_weight, dispatch_class, pays) = Pallet::::weight_and_dispatch_class(calls); let dispatch_weight = dispatch_weight.saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); (dispatch_weight, dispatch_class, pays) })] diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index 5c0bd3568f..42bd198c5e 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -127,14 +127,14 @@ type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Test { - System: frame_system, - Timestamp: pallet_timestamp, - Balances: pallet_balances, - RootTesting: pallet_root_testing, - Council: pallet_collective::, - Utility: utility, - Example: example, - Democracy: mock_democracy, + System: frame_system = 1, + Timestamp: pallet_timestamp = 2, + Balances: pallet_balances = 3, + RootTesting: pallet_root_testing = 4, + Council: pallet_collective:: = 5, + Utility: utility = 6, + Example: example = 7, + Democracy: mock_democracy = 8, } ); From 59045c9236fc9fee97d5c9276e43143dc89a7a0a Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 15:57:35 -0300 Subject: [PATCH 216/379] cargo fmt --- pallets/utility/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pallets/utility/src/lib.rs b/pallets/utility/src/lib.rs index 577ce8e941..bafb7ce9d9 100644 --- a/pallets/utility/src/lib.rs +++ b/pallets/utility/src/lib.rs @@ -149,7 +149,9 @@ pub mod pallet { // The margin to take into account vec doubling capacity. let margin_factor = 3; - allocator_limit.checked_div(margin_factor).map_or(0, |x| x.checked_div(call_size).unwrap_or(0)) + allocator_limit + .checked_div(margin_factor) + .map_or(0, |x| x.checked_div(call_size).unwrap_or(0)) } } From 6eedf4b4413e101bb132c7170a10664bf0c58b44 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 16:38:35 -0300 Subject: [PATCH 217/379] fix linting --- pallets/utility/src/tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/utility/src/tests.rs b/pallets/utility/src/tests.rs index 42bd198c5e..17b5cd96ca 100644 --- a/pallets/utility/src/tests.rs +++ b/pallets/utility/src/tests.rs @@ -18,6 +18,7 @@ // Tests for Utility Pallet #![cfg(test)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used)] use super::*; @@ -39,6 +40,7 @@ type BlockNumber = u64; // example module to test behaviors. #[frame_support::pallet(dev_mode)] +#[allow(clippy::large_enum_variant)] pub mod example { use frame_support::{dispatch::WithPostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; From 44fc6bf2f66696e992a543c69810790e27f8738d Mon Sep 17 00:00:00 2001 From: bdhimes Date: Thu, 11 Sep 2025 21:51:03 +0200 Subject: [PATCH 218/379] Corrected #99 --- pallets/subtensor/src/subnets/symbols.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index 21a3652fb4..2546a84c56 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -96,7 +96,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xE1\x9A\xA6", // ᚦ (Thurisaz, giant, 83) b"\xE1\x9A\xA8", // ᚨ (Ansuz, god, 84) b"\xE1\x9A\xB1", // ᚱ (Raidho, ride, 85) - b"\xE1\x9A\xB2", // ᚲ (Kaunan, ulcer, 86) + b"\xE1\x9A\xB3", // ᚳ (Kaunan, ulcer, 86) b"\xD0\xAB", // Ы (Cyrillic Yeru, 87) b"\xE1\x9B\x89", // ᛉ (Algiz, protection, 88) b"\xE1\x9B\x92", // ᛒ (Berkanan, birch, 89) From 4b7731b64e922fd6aa1fdbb2a56846d3e4e64586 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 17:01:48 -0300 Subject: [PATCH 219/379] fix linting 2 --- pallets/proxy/src/tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index 87b56531c1..9e54f52144 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -18,6 +18,7 @@ // Tests for Proxy Pallet #![cfg(test)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used, clippy::indexing-slicing)] use super::*; use crate as proxy; From cb07050ac3aa29a661c702f904778ee4efc547c4 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 17:13:06 -0300 Subject: [PATCH 220/379] fix linting 3 --- pallets/proxy/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index 9e54f52144..8449796af9 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -18,7 +18,7 @@ // Tests for Proxy Pallet #![cfg(test)] -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used, clippy::indexing-slicing)] +#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used, clippy::indexing_slicing)] use super::*; use crate as proxy; From b23a2ac055adb31055a3bfcc330a52750f7e4c51 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Thu, 11 Sep 2025 17:20:18 -0300 Subject: [PATCH 221/379] fix linting 4 --- pallets/proxy/src/tests.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index 8449796af9..4e5e4722c3 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -18,7 +18,11 @@ // Tests for Proxy Pallet #![cfg(test)] -#![allow(clippy::arithmetic_side_effects, clippy::unwrap_used, clippy::indexing_slicing)] +#![allow( + clippy::arithmetic_side_effects, + clippy::unwrap_used, + clippy::indexing_slicing +)] use super::*; use crate as proxy; From 8d1dc418782c7e700e85792da0c99b3c1abb3c78 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Thu, 11 Sep 2025 16:22:57 -0400 Subject: [PATCH 222/379] allow setting storage based on tempos --- .../neuron.precompile.reveal-weights.test.ts | 4 +- .../neuron.precompile.set-weights.test.ts | 4 +- .../test/staking.precompile.reward.test.ts | 4 +- .../subnet.precompile.hyperparameter.test.ts | 4 +- pallets/admin-utils/src/benchmarking.rs | 4 +- pallets/admin-utils/src/lib.rs | 10 ++--- pallets/admin-utils/src/tests/mod.rs | 8 ++-- pallets/subtensor/src/lib.rs | 18 ++++----- pallets/subtensor/src/macros/events.rs | 4 +- pallets/subtensor/src/macros/hooks.rs | 2 + .../migrate_owner_hparam_rl_to_tempos.rs | 39 +++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/utils/misc.rs | 6 +-- pallets/subtensor/src/utils/rate_limiting.rs | 9 +++-- 14 files changed, 80 insertions(+), 37 deletions(-) create mode 100644 pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index 52ddc91967..ac9c598743 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -79,8 +79,8 @@ describe("Test neuron precompile reveal weights", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + // Set OwnerHyperparamTempos to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 4ecc0b36db..1423a2d045 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -47,8 +47,8 @@ describe("Test neuron precompile contract, set weights function", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + // Set OwnerHyperparamTempos to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 108e0ed88c..93e7010b53 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -48,8 +48,8 @@ describe("Test neuron precompile reward", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + // Set OwnerHyperparamTempos to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index 5d81049d41..2741b018f7 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -35,8 +35,8 @@ describe("Test the Subnet precompile contract", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamRateLimit to 0 - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ limit: BigInt(0) }) + // Set OwnerHyperparamTempos to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index b8dafc0de2..085ee56b03 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -353,9 +353,9 @@ mod benchmarks { } #[benchmark] - fn sudo_set_owner_hparam_rate_limit() { + fn sudo_set_owner_hparam_tempos() { #[extrinsic_call] - _(RawOrigin::Root, 10u64/*limit*/)/*sudo_set_owner_hparam_rate_limit*/; + _(RawOrigin::Root, 2u16/*tempos*/)/*sudo_set_owner_hparam_tempos*/; } #[benchmark] diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 4af202132f..8d20fe3750 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1833,17 +1833,17 @@ pub mod pallet { Ok(()) } - /// Sets the owner hyperparameter rate limit (in blocks). + /// Sets the owner hyperparameter rate limit in tempos (global multiplier). /// Only callable by root. #[pallet::call_index(75)] #[pallet::weight((0, DispatchClass::Operational, Pays::No))] - pub fn sudo_set_owner_hparam_rate_limit( + pub fn sudo_set_owner_hparam_tempos( origin: OriginFor, - limit: u64, + tempos: u16, ) -> DispatchResult { ensure_root(origin)?; - pallet_subtensor::Pallet::::set_owner_hyperparam_rate_limit(limit); - log::debug!("OwnerHyperparamRateLimitSet( limit: {limit:?} ) "); + pallet_subtensor::Pallet::::set_owner_hyperparam_tempos(tempos); + log::debug!("OwnerHyperparamTemposSet( tempos: {tempos:?} ) "); Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 711569726b..79ea690706 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1973,19 +1973,19 @@ fn test_sudo_set_admin_freeze_window_and_rate() { )); assert_eq!(pallet_subtensor::AdminFreezeWindow::::get(), 7); - // Owner hyperparam rate limit setter + // Owner hyperparam tempos setter assert_eq!( - AdminUtils::sudo_set_owner_hparam_rate_limit( + AdminUtils::sudo_set_owner_hparam_tempos( <::RuntimeOrigin>::signed(U256::from(1)), 5 ), Err(DispatchError::BadOrigin) ); - assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( + assert_ok!(AdminUtils::sudo_set_owner_hparam_tempos( <::RuntimeOrigin>::root(), 5 )); - assert_eq!(pallet_subtensor::OwnerHyperparamRateLimit::::get(), 5); + assert_eq!(pallet_subtensor::OwnerHyperparamTempos::::get(), 5); }); } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index dd12a9b76b..235e8bee76 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -860,18 +860,18 @@ pub mod pallet { 50400 } - #[pallet::type_value] - /// Default value for subnet owner hyperparameter update rate limit (in blocks) - pub fn DefaultOwnerHyperparamRateLimit() -> u64 { - 0 - } - #[pallet::type_value] /// Default number of terminal blocks in a tempo during which admin operations are prohibited pub fn DefaultAdminFreezeWindow() -> u16 { 10 } + #[pallet::type_value] + /// Default number of tempos for owner hyperparameter update rate limit + pub fn DefaultOwnerHyperparamTempos() -> u16 { + 2 + } + #[pallet::type_value] /// Default value for ck burn, 18%. pub fn DefaultCKBurn() -> u64 { @@ -888,9 +888,9 @@ pub mod pallet { StorageValue<_, u16, ValueQuery, DefaultAdminFreezeWindow>; #[pallet::storage] - /// Global rate limit (in blocks) for subnet owner hyperparameter updates - pub type OwnerHyperparamRateLimit = - StorageValue<_, u64, ValueQuery, DefaultOwnerHyperparamRateLimit>; + /// Global number of tempos used to rate limit subnet owner hyperparameter updates + pub type OwnerHyperparamTempos = + StorageValue<_, u16, ValueQuery, DefaultOwnerHyperparamTempos>; #[pallet::storage] pub type ColdkeySwapScheduleDuration = diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index a0779cd8b1..2400e3ef8c 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -116,8 +116,8 @@ mod events { TxChildKeyTakeRateLimitSet(u64), /// setting the admin freeze window length (last N blocks of tempo) AdminFreezeWindowSet(u16), - /// setting the owner hyperparameter rate limit (in blocks) - OwnerHyperparamRateLimitSet(u64), + /// setting the owner hyperparameter rate limit in tempos + OwnerHyperparamTemposSet(u16), /// minimum childkey take set MinChildKeyTakeSet(u16), /// maximum childkey take set diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index b43f9422df..d23d5884f2 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -138,6 +138,8 @@ mod hooks { .saturating_add(migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::()) // Migrate last block rate limiting storage items .saturating_add(migrations::migrate_rate_limiting_last_blocks::migrate_obsolete_rate_limiting_last_blocks_storage::()) + // Remove deprecated OwnerHyperparamRateLimit storage item + .saturating_add(migrations::migrate_owner_hparam_rl_to_tempos::migrate_owner_hyperparam_rl_to_tempos::()) // Migrate remove network modality .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()); weight diff --git a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs new file mode 100644 index 0000000000..9a5d8b4842 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs @@ -0,0 +1,39 @@ +use super::*; +use crate::HasMigrationRun; +use codec::Decode; +use frame_support::weights::Weight; +use sp_io::hashing::twox_128; +use sp_io::storage::{clear, get}; + +/// Remove the deprecated OwnerHyperparamRateLimit storage item. +/// If the old value was 0 (disabled), preserve that by setting OwnerHyperparamTempos to 0. +/// Otherwise, leave the new storage at its default (2 tempos). +pub fn migrate_owner_hyperparam_rl_to_tempos() -> Weight { + let migration_name = b"migrate_owner_hyperparam_rl_to_tempos".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!("Migration '{:?}' already executed. Skipping.", migration_name); + return weight; + } + + let pallet_name = twox_128("SubtensorModule".as_bytes()); + let storage_name = twox_128("OwnerHyperparamRateLimit".as_bytes()); + let full_key = [pallet_name, storage_name].concat(); + + if let Some(value_bytes) = get(&full_key) { + if let Ok(old_limit_blocks) = ::decode(&mut &value_bytes[..]) { + if old_limit_blocks == 0u64 { + // Preserve disabled state + Pallet::::set_owner_hyperparam_tempos(0); + } + } + + clear(&full_key); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index b7265cc6d0..b5818add7b 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -23,6 +23,7 @@ pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_rate_limiting_last_blocks; +pub mod migrate_owner_hparam_rl_to_tempos; pub mod migrate_remove_commitments_rate_limit; pub mod migrate_remove_network_modality; pub mod migrate_remove_stake_map; diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 8703b1774b..28e3599368 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -116,9 +116,9 @@ impl Pallet { Self::deposit_event(Event::AdminFreezeWindowSet(window)); } - pub fn set_owner_hyperparam_rate_limit(limit: u64) { - OwnerHyperparamRateLimit::::set(limit); - Self::deposit_event(Event::OwnerHyperparamRateLimitSet(limit)); + pub fn set_owner_hyperparam_tempos(tempos: u16) { + OwnerHyperparamTempos::::set(tempos); + Self::deposit_event(Event::OwnerHyperparamTemposSet(tempos)); } /// If owner is `Some`, record last-blocks for the provided `TransactionType`s. diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 319f759411..9ea504159b 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -56,7 +56,6 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) @@ -69,9 +68,11 @@ impl Pallet { match tx_type { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), - // Owner hyperparameter updates are now rate-limited by 2 tempos on the subnet - TransactionType::OwnerHyperparamUpdate => - (Tempo::::get(netuid) as u64).saturating_mul(2), + // Owner hyperparameter updates are rate-limited by N tempos on the subnet (sudo configurable) + TransactionType::OwnerHyperparamUpdate => { + let tempos = OwnerHyperparamTempos::::get() as u64; + (Tempo::::get(netuid) as u64).saturating_mul(tempos) + } TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), _ => Self::get_rate_limit(tx_type), From 7d24770d988ec8cd10c81fbacbf9623770c06216 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Thu, 11 Sep 2025 17:33:27 -0400 Subject: [PATCH 223/379] make hyperparameter specific --- pallets/admin-utils/src/lib.rs | 102 +++++++++---------- pallets/subtensor/src/tests/ensure.rs | 14 +-- pallets/subtensor/src/utils/rate_limiting.rs | 97 +++++++++++++++++- 3 files changed, 151 insertions(+), 62 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 8d20fe3750..4f2170599c 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -216,14 +216,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetServingRateLimit], )?; pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); log::debug!("ServingRateLimitSet( serving_rate_limit: {serving_rate_limit:?} ) "); pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetServingRateLimit], ); Ok(()) } @@ -268,7 +268,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxDifficulty], )?; ensure!( @@ -282,7 +282,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxDifficulty], ); Ok(()) } @@ -302,10 +302,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[ - TransactionType::OwnerHyperparamUpdate, - TransactionType::SetWeightsVersionKey, - ], + &[TransactionType::SetWeightsVersionKey], )?; ensure!( @@ -316,10 +313,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[ - TransactionType::OwnerHyperparamUpdate, - TransactionType::SetWeightsVersionKey, - ], + &[TransactionType::SetWeightsVersionKey], ); pallet_subtensor::Pallet::::set_weights_version_key(netuid, weights_version_key); @@ -399,7 +393,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAdjustmentAlpha], )?; ensure!( @@ -410,7 +404,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAdjustmentAlpha], ); log::debug!("AdjustmentAlphaSet( adjustment_alpha: {adjustment_alpha:?} ) "); Ok(()) @@ -431,7 +425,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxWeightLimit], )?; ensure!( @@ -442,7 +436,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxWeightLimit], ); log::debug!( "MaxWeightLimitSet( netuid: {netuid:?} max_weight_limit: {max_weight_limit:?} ) " @@ -465,7 +459,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetImmunityPeriod], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -476,7 +470,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetImmunityPeriod], ); log::debug!( "ImmunityPeriodSet( netuid: {netuid:?} immunity_period: {immunity_period:?} ) " @@ -499,7 +493,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMinAllowedWeights], )?; ensure!( @@ -513,7 +507,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMinAllowedWeights], ); Ok(()) } @@ -557,7 +551,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetKappa], )?; ensure!( @@ -569,7 +563,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetKappa], ); Ok(()) } @@ -585,7 +579,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetRho], )?; ensure!( @@ -597,7 +591,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetRho], ); Ok(()) } @@ -617,7 +611,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetActivityCutoff], )?; ensure!( @@ -637,7 +631,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetActivityCutoff], ); Ok(()) } @@ -685,7 +679,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetPowRegistrationAllowed], )?; pallet_subtensor::Pallet::::set_network_pow_registration_allowed( @@ -698,7 +692,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetPowRegistrationAllowed], ); Ok(()) } @@ -746,7 +740,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMinBurn], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -766,7 +760,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMinBurn], ); Ok(()) } @@ -786,7 +780,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxBurn], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -806,7 +800,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetMaxBurn], ); Ok(()) } @@ -881,7 +875,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetBondsMovingAverage], )?; if maybe_owner.is_some() { ensure!( @@ -901,7 +895,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetBondsMovingAverage], ); Ok(()) } @@ -921,7 +915,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetBondsPenalty], )?; ensure!( @@ -933,7 +927,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetBondsPenalty], ); Ok(()) } @@ -1205,7 +1199,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleCommitReveal], )?; ensure!( @@ -1218,7 +1212,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleCommitReveal], ); Ok(()) } @@ -1242,14 +1236,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleLiquidAlphaEnabled], )?; pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); log::debug!("LiquidAlphaEnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleLiquidAlphaEnabled], ); Ok(()) } @@ -1266,7 +1260,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAlphaValues], )?; let res = pallet_subtensor::Pallet::::do_set_alpha_values( origin, netuid, alpha_low, alpha_high, @@ -1275,7 +1269,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAlphaValues], ); } res @@ -1373,7 +1367,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetWeightCommitInterval], )?; ensure!( @@ -1387,7 +1381,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetWeightCommitInterval], ); Ok(()) @@ -1465,14 +1459,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleTransfer], )?; let res = pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle); if res.is_ok() { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleTransfer], ); } res @@ -1607,7 +1601,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAlphaSigmoidSteepness], )?; ensure!( @@ -1627,7 +1621,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetAlphaSigmoidSteepness], ); Ok(()) } @@ -1651,7 +1645,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleYuma3Enabled], )?; pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); @@ -1660,7 +1654,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleYuma3Enabled], ); Ok(()) } @@ -1684,7 +1678,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleBondsReset], )?; pallet_subtensor::Pallet::::set_bonds_reset(netuid, enabled); @@ -1693,7 +1687,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerToggleBondsReset], ); Ok(()) } @@ -1797,13 +1791,13 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetOwnerImmuneNeuronLimit], )?; pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetOwnerImmuneNeuronLimit], ); Ok(()) } diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index e7bd70fb5a..2298a7d889 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -105,22 +105,24 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetKappa], ) .expect("should pass"); assert_eq!(res, Some(owner)); // Simulate previous update at current block -> next call should fail due to rate limit let now = crate::Pallet::::get_current_block_as_u64(); - crate::Pallet::::set_rate_limited_last_block( - &RateLimitKey::OwnerHyperparamUpdate(netuid), + crate::Pallet::::set_last_transaction_block_on_subnet( + &owner, + netuid, + &TransactionType::OwnerSetKappa, now, ); assert_noop!( crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetKappa], ), crate::Error::::TxRateLimitExceeded ); @@ -130,7 +132,7 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { assert_ok!(crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerHyperparamUpdate] + &[TransactionType::OwnerSetKappa] )); // Now advance into the freeze window; ensure blocks @@ -151,7 +153,7 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerHyperparamUpdate], + &[TransactionType::OwnerSetKappa], ), crate::Error::::AdminActionProhibitedDuringWeightsWindow ); diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 9ea504159b..eea9fdeaf0 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -11,8 +11,32 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, - OwnerHyperparamUpdate, + OwnerHyperparamUpdate, // Deprecated aggregate; keep for compatibility if referenced in tests SubsubnetParameterUpdate, + // Per-hyperparameter owner updates (rate-limited independently) + OwnerSetServingRateLimit, + OwnerSetMaxDifficulty, + OwnerSetAdjustmentAlpha, + OwnerSetMaxWeightLimit, + OwnerSetImmunityPeriod, + OwnerSetMinAllowedWeights, + OwnerSetKappa, + OwnerSetRho, + OwnerSetActivityCutoff, + OwnerSetPowRegistrationAllowed, + OwnerSetMinBurn, + OwnerSetMaxBurn, + OwnerSetBondsMovingAverage, + OwnerSetBondsPenalty, + OwnerToggleCommitReveal, + OwnerToggleLiquidAlphaEnabled, + OwnerSetAlphaValues, + OwnerSetWeightCommitInterval, + OwnerToggleTransfer, + OwnerSetAlphaSigmoidSteepness, + OwnerToggleYuma3Enabled, + OwnerToggleBondsReset, + OwnerSetOwnerImmuneNeuronLimit, } /// Implement conversion from TransactionType to u16 @@ -27,6 +51,29 @@ impl From for u16 { TransactionType::SetSNOwnerHotkey => 5, TransactionType::OwnerHyperparamUpdate => 6, TransactionType::SubsubnetParameterUpdate => 7, + TransactionType::OwnerSetServingRateLimit => 10, + TransactionType::OwnerSetMaxDifficulty => 11, + TransactionType::OwnerSetAdjustmentAlpha => 12, + TransactionType::OwnerSetMaxWeightLimit => 13, + TransactionType::OwnerSetImmunityPeriod => 14, + TransactionType::OwnerSetMinAllowedWeights => 15, + TransactionType::OwnerSetKappa => 16, + TransactionType::OwnerSetRho => 17, + TransactionType::OwnerSetActivityCutoff => 18, + TransactionType::OwnerSetPowRegistrationAllowed => 19, + TransactionType::OwnerSetMinBurn => 20, + TransactionType::OwnerSetMaxBurn => 21, + TransactionType::OwnerSetBondsMovingAverage => 22, + TransactionType::OwnerSetBondsPenalty => 23, + TransactionType::OwnerToggleCommitReveal => 24, + TransactionType::OwnerToggleLiquidAlphaEnabled => 25, + TransactionType::OwnerSetAlphaValues => 26, + TransactionType::OwnerSetWeightCommitInterval => 27, + TransactionType::OwnerToggleTransfer => 28, + TransactionType::OwnerSetAlphaSigmoidSteepness => 29, + TransactionType::OwnerToggleYuma3Enabled => 30, + TransactionType::OwnerToggleBondsReset => 31, + TransactionType::OwnerSetOwnerImmuneNeuronLimit => 32, } } } @@ -42,6 +89,29 @@ impl From for TransactionType { 5 => TransactionType::SetSNOwnerHotkey, 6 => TransactionType::OwnerHyperparamUpdate, 7 => TransactionType::SubsubnetParameterUpdate, + 10 => TransactionType::OwnerSetServingRateLimit, + 11 => TransactionType::OwnerSetMaxDifficulty, + 12 => TransactionType::OwnerSetAdjustmentAlpha, + 13 => TransactionType::OwnerSetMaxWeightLimit, + 14 => TransactionType::OwnerSetImmunityPeriod, + 15 => TransactionType::OwnerSetMinAllowedWeights, + 16 => TransactionType::OwnerSetKappa, + 17 => TransactionType::OwnerSetRho, + 18 => TransactionType::OwnerSetActivityCutoff, + 19 => TransactionType::OwnerSetPowRegistrationAllowed, + 20 => TransactionType::OwnerSetMinBurn, + 21 => TransactionType::OwnerSetMaxBurn, + 22 => TransactionType::OwnerSetBondsMovingAverage, + 23 => TransactionType::OwnerSetBondsPenalty, + 24 => TransactionType::OwnerToggleCommitReveal, + 25 => TransactionType::OwnerToggleLiquidAlphaEnabled, + 26 => TransactionType::OwnerSetAlphaValues, + 27 => TransactionType::OwnerSetWeightCommitInterval, + 28 => TransactionType::OwnerToggleTransfer, + 29 => TransactionType::OwnerSetAlphaSigmoidSteepness, + 30 => TransactionType::OwnerToggleYuma3Enabled, + 31 => TransactionType::OwnerToggleBondsReset, + 32 => TransactionType::OwnerSetOwnerImmuneNeuronLimit, _ => TransactionType::Unknown, } } @@ -69,7 +139,30 @@ impl Pallet { TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), // Owner hyperparameter updates are rate-limited by N tempos on the subnet (sudo configurable) - TransactionType::OwnerHyperparamUpdate => { + TransactionType::OwnerHyperparamUpdate + | TransactionType::OwnerSetServingRateLimit + | TransactionType::OwnerSetMaxDifficulty + | TransactionType::OwnerSetAdjustmentAlpha + | TransactionType::OwnerSetMaxWeightLimit + | TransactionType::OwnerSetImmunityPeriod + | TransactionType::OwnerSetMinAllowedWeights + | TransactionType::OwnerSetKappa + | TransactionType::OwnerSetRho + | TransactionType::OwnerSetActivityCutoff + | TransactionType::OwnerSetPowRegistrationAllowed + | TransactionType::OwnerSetMinBurn + | TransactionType::OwnerSetMaxBurn + | TransactionType::OwnerSetBondsMovingAverage + | TransactionType::OwnerSetBondsPenalty + | TransactionType::OwnerToggleCommitReveal + | TransactionType::OwnerToggleLiquidAlphaEnabled + | TransactionType::OwnerSetAlphaValues + | TransactionType::OwnerSetWeightCommitInterval + | TransactionType::OwnerToggleTransfer + | TransactionType::OwnerSetAlphaSigmoidSteepness + | TransactionType::OwnerToggleYuma3Enabled + | TransactionType::OwnerToggleBondsReset + | TransactionType::OwnerSetOwnerImmuneNeuronLimit => { let tempos = OwnerHyperparamTempos::::get() as u64; (Tempo::::get(netuid) as u64).saturating_mul(tempos) } From e1f7897e97259a87c144a38a03b3b15b540180c5 Mon Sep 17 00:00:00 2001 From: 0xcacti <0xcacti@gmail.com> Date: Thu, 11 Sep 2025 23:52:34 -0400 Subject: [PATCH 224/379] emissions fix --- pallets/subtensor/src/coinbase/run_coinbase.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index c11ed56584..1a01e7ea43 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -70,10 +70,10 @@ impl Pallet { .unwrap_or(asfloat!(0.0)); log::debug!("default_tao_in_i: {default_tao_in_i:?}"); // Get alpha_emission total - let alpha_emission_i: U96F32 = asfloat!( - Self::get_block_emission_for_issuance(Self::get_alpha_issuance(*netuid_i).into()) - .unwrap_or(0) - ); + let alpha_emission_i: U96F32 = asfloat!(Self::get_block_emission_for_issuance( + Self::get_alpha_issuance(*netuid_i).into() + ) + .unwrap_or(0)); log::debug!("alpha_emission_i: {alpha_emission_i:?}"); // Get initial alpha_in @@ -85,7 +85,7 @@ impl Pallet { ); if price_i < tao_in_ratio { tao_in_i = price_i.saturating_mul(U96F32::saturating_from_num(block_emission)); - alpha_in_i = alpha_emission_i; + alpha_in_i = block_emission; let difference_tao: U96F32 = default_tao_in_i.saturating_sub(tao_in_i); // Difference becomes buy. let buy_swap_result = Self::swap_tao_for_alpha( @@ -103,7 +103,7 @@ impl Pallet { is_subsidized.insert(*netuid_i, true); } else { tao_in_i = default_tao_in_i; - alpha_in_i = tao_in_i.safe_div_or(price_i, alpha_emission_i); + alpha_in_i = tao_in_i.safe_div_or(price_i, tao_in.safe_div(moving_price_i)); is_subsidized.insert(*netuid_i, false); } log::debug!("alpha_in_i: {alpha_in_i:?}"); @@ -209,7 +209,7 @@ impl Pallet { let root_alpha: U96F32 = root_proportion .saturating_mul(alpha_out_i) // Total alpha emission per block remaining. .saturating_mul(asfloat!(0.5)); // 50% to validators. - // Remove root alpha from alpha_out. + // Remove root alpha from alpha_out. log::debug!("root_alpha: {root_alpha:?}"); // Get pending alpha as original alpha_out - root_alpha. let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); From 4b1ad57001bf66ed6ac384829515a71299f4388f Mon Sep 17 00:00:00 2001 From: 0xcacti <0xcacti@gmail.com> Date: Thu, 11 Sep 2025 23:54:15 -0400 Subject: [PATCH 225/379] handle panic --- pallets/subtensor/src/coinbase/run_coinbase.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 1a01e7ea43..b2872f998a 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -70,10 +70,10 @@ impl Pallet { .unwrap_or(asfloat!(0.0)); log::debug!("default_tao_in_i: {default_tao_in_i:?}"); // Get alpha_emission total - let alpha_emission_i: U96F32 = asfloat!(Self::get_block_emission_for_issuance( - Self::get_alpha_issuance(*netuid_i).into() - ) - .unwrap_or(0)); + let alpha_emission_i: U96F32 = asfloat!( + Self::get_block_emission_for_issuance(Self::get_alpha_issuance(*netuid_i).into()) + .unwrap_or(0) + ); log::debug!("alpha_emission_i: {alpha_emission_i:?}"); // Get initial alpha_in @@ -103,7 +103,7 @@ impl Pallet { is_subsidized.insert(*netuid_i, true); } else { tao_in_i = default_tao_in_i; - alpha_in_i = tao_in_i.safe_div_or(price_i, tao_in.safe_div(moving_price_i)); + alpha_in_i = tao_in_i.safe_div(price_i); // Must panic if price_i is 0. is_subsidized.insert(*netuid_i, false); } log::debug!("alpha_in_i: {alpha_in_i:?}"); @@ -209,7 +209,7 @@ impl Pallet { let root_alpha: U96F32 = root_proportion .saturating_mul(alpha_out_i) // Total alpha emission per block remaining. .saturating_mul(asfloat!(0.5)); // 50% to validators. - // Remove root alpha from alpha_out. + // Remove root alpha from alpha_out. log::debug!("root_alpha: {root_alpha:?}"); // Get pending alpha as original alpha_out - root_alpha. let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); From 943c29cec19f68966aa4f791169215111a00359e Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 15:17:57 +0300 Subject: [PATCH 226/379] Sign associate evm key extrinsic by coldkey --- .../test/evm-uid.precompile.lookup.test.ts | 3 +- pallets/subtensor/src/macros/dispatches.rs | 3 +- pallets/subtensor/src/tests/evm.rs | 49 +++++++++++++++++-- pallets/subtensor/src/utils/evm.rs | 20 +++++--- 4 files changed, 63 insertions(+), 12 deletions(-) diff --git a/evm-tests/test/evm-uid.precompile.lookup.test.ts b/evm-tests/test/evm-uid.precompile.lookup.test.ts index f6e22ce032..6e702d612e 100644 --- a/evm-tests/test/evm-uid.precompile.lookup.test.ts +++ b/evm-tests/test/evm-uid.precompile.lookup.test.ts @@ -55,11 +55,12 @@ describe("Test the UID Lookup precompile", () => { const signature = await evmWallet.signMessage(concatenatedArray); const associateEvmKeyTx = api.tx.SubtensorModule.associate_evm_key({ netuid: netuid, + hotkey: convertPublicKeyToSs58(hotkey.publicKey), evm_key: convertToFixedSizeBinary(evmWallet.address, 20), block_number: BigInt(blockNumber), signature: convertToFixedSizeBinary(signature, 65) }); - const signer = getSignerFromKeypair(hotkey); + const signer = getSignerFromKeypair(coldkey); await waitForTransactionCompletion(api, associateEvmKeyTx, signer) .then(() => { }) .catch((error) => { console.log(`transaction error ${error}`) }); diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 28f7ff384d..1325a6258c 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2048,11 +2048,12 @@ mod dispatches { pub fn associate_evm_key( origin: T::RuntimeOrigin, netuid: NetUid, + hotkey: T::AccountId, evm_key: H160, block_number: u64, signature: Signature, ) -> DispatchResult { - Self::do_associate_evm_key(origin, netuid, evm_key, block_number, signature) + Self::do_associate_evm_key(origin, netuid, hotkey, evm_key, block_number, signature) } /// Recycles alpha from a cold/hot key pair, reducing AlphaOut on a subnet diff --git a/pallets/subtensor/src/tests/evm.rs b/pallets/subtensor/src/tests/evm.rs index a65e69c207..95d0c4e6db 100644 --- a/pallets/subtensor/src/tests/evm.rs +++ b/pallets/subtensor/src/tests/evm.rs @@ -58,8 +58,9 @@ fn test_associate_evm_key_success() { let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -104,8 +105,9 @@ fn test_associate_evm_key_different_block_number_success() { let signature = sign_evm_message(&pair, message); assert_ok!(SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -123,6 +125,43 @@ fn test_associate_evm_key_different_block_number_success() { }); } +#[test] +fn test_associate_evm_key_coldkey_does_not_own_hotkey() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + + let tempo: u16 = 2; + let modality: u16 = 2; + + add_network(netuid, tempo, modality); + + let coldkey = U256::from(1); + let hotkey = U256::from(2); + + let pair = ecdsa::Pair::generate().0; + let public = pair.public(); + let evm_key = public_to_evm_key(&public); + let block_number = frame_system::Pallet::::block_number(); + let hashed_block_number = keccak_256(block_number.encode().as_ref()); + let hotkey_bytes = hotkey.encode(); + + let message = [hotkey_bytes.as_ref(), hashed_block_number.as_ref()].concat(); + let signature = sign_evm_message(&pair, message); + + assert_err!( + SubtensorModule::associate_evm_key( + RuntimeOrigin::signed(coldkey), + netuid, + hotkey, + evm_key, + block_number, + signature, + ), + Error::::NonAssociatedColdKey + ); + }); +} + #[test] fn test_associate_evm_key_hotkey_not_registered_in_subnet() { new_test_ext(1).execute_with(|| { @@ -149,8 +188,9 @@ fn test_associate_evm_key_hotkey_not_registered_in_subnet() { assert_err!( SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, @@ -189,8 +229,9 @@ fn test_associate_evm_key_using_wrong_hash_function() { assert_err!( SubtensorModule::associate_evm_key( - RuntimeOrigin::signed(hotkey), + RuntimeOrigin::signed(coldkey), netuid, + hotkey, evm_key, block_number, signature, diff --git a/pallets/subtensor/src/utils/evm.rs b/pallets/subtensor/src/utils/evm.rs index 652fb8ea27..757c2ef91e 100644 --- a/pallets/subtensor/src/utils/evm.rs +++ b/pallets/subtensor/src/utils/evm.rs @@ -24,11 +24,12 @@ impl Pallet { /// Associate an EVM key with a hotkey. /// - /// This function accepts a Signature, which is a signed message containing the hotkey concatenated with - /// the hashed block number. It will then attempt to recover the EVM key from the signature and compare it - /// with the `evm_key` parameter, and ensures that they match. + /// This function accepts a Signature, which is a signed message containing the hotkey + /// concatenated with the hashed block number. It will then attempt to recover the EVM key from + /// the signature and compare it with the `evm_key` parameter, and ensures that they match. /// - /// The EVM key is expected to sign the message according to this formula to produce the signature: + /// The EVM key is expected to sign the message according to this formula to produce the + /// signature: /// ```text /// keccak_256(hotkey ++ keccak_256(block_number)) /// ``` @@ -40,15 +41,22 @@ impl Pallet { /// * `hotkey` - The hotkey associated with the `origin` coldkey. /// * `evm_key` - The EVM address to associate with the `hotkey`. /// * `block_number` - The block number used in the `signature`. - /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed `block_number`. + /// * `signature` - A signed message by the `evm_key` containing the `hotkey` and the hashed + /// `block_number`. pub fn do_associate_evm_key( origin: T::RuntimeOrigin, netuid: NetUid, + hotkey: T::AccountId, evm_key: H160, block_number: u64, mut signature: Signature, ) -> dispatch::DispatchResult { - let hotkey = ensure_signed(origin)?; + let coldkey = ensure_signed(origin)?; + + ensure!( + Self::get_owning_coldkey_for_hotkey(&hotkey) == coldkey, + Error::::NonAssociatedColdKey + ); // Normalize the v value to 0 or 1 if signature.0[64] >= 27 { From 949fdcb1a8202488f8bbe464e00c9cc20bad22f4 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Fri, 12 Sep 2025 10:13:02 -0300 Subject: [PATCH 227/379] trigger ci From 59349c314a2f71e97faac3fc8388bca4f2612872 Mon Sep 17 00:00:00 2001 From: bdhimes Date: Fri, 12 Sep 2025 15:49:13 +0200 Subject: [PATCH 228/379] Symbols <= 128 restored --- pallets/subtensor/src/subnets/symbols.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/symbols.rs b/pallets/subtensor/src/subnets/symbols.rs index 2546a84c56..6fd3e82f56 100644 --- a/pallets/subtensor/src/subnets/symbols.rs +++ b/pallets/subtensor/src/subnets/symbols.rs @@ -130,7 +130,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xD0\x83", // Ѓ (Gje, 113) b"\xD0\x84", // Є (Ukrainian Ie, 114) b"\xD0\x85", // Ѕ (Dze, 115) - b"\xD0\xAA", // Ъ (Hard sign, 116) + b"\xD1\x8A", // ъ (Hard sign, 116) // Coptic Alphabet b"\xE2\xB2\x80", // Ⲁ (Alfa, 117) b"\xE2\xB2\x81", // ⲁ (Small Alfa, 118) @@ -145,7 +145,7 @@ pub static SYMBOLS: [&[u8]; 439] = [ b"\xF0\x91\x80\x83", // 𑀃 (Ii, 126) b"\xF0\x91\x80\x85", // 𑀅 (U, 127) // End of Sinhala Alphabet - b"\xE0\xB6\xB2", // ඲ (La, 128) + b"\xE0\xB6\xB1", // න (La, 128) b"\xE0\xB6\xB3", // ඳ (Va, 129) b"\xE0\xB6\xB4", // ප (Sha, 130) b"\xE0\xB6\xB5", // ඵ (Ssa, 131) From 0d2dee41bc21135bbda67b24834b5dee2311b150 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 16:52:44 +0300 Subject: [PATCH 229/379] Rename tempos -> epochs - return `rate_limit` naming --- .../neuron.precompile.reveal-weights.test.ts | 4 ++-- .../test/neuron.precompile.set-weights.test.ts | 4 ++-- evm-tests/test/staking.precompile.reward.test.ts | 4 ++-- .../subnet.precompile.hyperparameter.test.ts | 4 ++-- pallets/admin-utils/src/benchmarking.rs | 4 ++-- pallets/admin-utils/src/lib.rs | 10 +++++----- pallets/admin-utils/src/tests/mod.rs | 12 ++++-------- pallets/subtensor/src/lib.rs | 8 ++++---- pallets/subtensor/src/macros/events.rs | 4 ++-- pallets/subtensor/src/macros/hooks.rs | 2 +- ...s.rs => migrate_owner_hparam_rl_to_epochs.rs} | 16 ++++++++-------- pallets/subtensor/src/migrations/mod.rs | 2 +- pallets/subtensor/src/tests/ensure.rs | 9 ++++----- pallets/subtensor/src/utils/misc.rs | 6 +++--- pallets/subtensor/src/utils/rate_limiting.rs | 5 +++-- 15 files changed, 45 insertions(+), 49 deletions(-) rename pallets/subtensor/src/migrations/{migrate_owner_hparam_rl_to_tempos.rs => migrate_owner_hparam_rl_to_epochs.rs} (65%) diff --git a/evm-tests/test/neuron.precompile.reveal-weights.test.ts b/evm-tests/test/neuron.precompile.reveal-weights.test.ts index ac9c598743..98d1b177cc 100644 --- a/evm-tests/test/neuron.precompile.reveal-weights.test.ts +++ b/evm-tests/test/neuron.precompile.reveal-weights.test.ts @@ -79,8 +79,8 @@ describe("Test neuron precompile reveal weights", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamTempos to 0 (disable RL) - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) + // Set OwnerHyperparamRateLimit to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ epochs: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/neuron.precompile.set-weights.test.ts b/evm-tests/test/neuron.precompile.set-weights.test.ts index 1423a2d045..fd4c5cefd9 100644 --- a/evm-tests/test/neuron.precompile.set-weights.test.ts +++ b/evm-tests/test/neuron.precompile.set-weights.test.ts @@ -47,8 +47,8 @@ describe("Test neuron precompile contract, set weights function", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamTempos to 0 (disable RL) - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) + // Set OwnerHyperparamRateLimit to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ epochs: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/staking.precompile.reward.test.ts b/evm-tests/test/staking.precompile.reward.test.ts index 93e7010b53..cd02b6ae90 100644 --- a/evm-tests/test/staking.precompile.reward.test.ts +++ b/evm-tests/test/staking.precompile.reward.test.ts @@ -48,8 +48,8 @@ describe("Test neuron precompile reward", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamTempos to 0 (disable RL) - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) + // Set OwnerHyperparamRateLimit to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ epochs: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/evm-tests/test/subnet.precompile.hyperparameter.test.ts b/evm-tests/test/subnet.precompile.hyperparameter.test.ts index 2741b018f7..b0e2983597 100644 --- a/evm-tests/test/subnet.precompile.hyperparameter.test.ts +++ b/evm-tests/test/subnet.precompile.hyperparameter.test.ts @@ -35,8 +35,8 @@ describe("Test the Subnet precompile contract", () => { const sudoFreezeTx = api.tx.Sudo.sudo({ call: setFreezeWindow.decodedCall }) await waitForTransactionWithRetry(api, sudoFreezeTx, alice) - // Set OwnerHyperparamTempos to 0 (disable RL) - const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_tempos({ tempos: 0 }) + // Set OwnerHyperparamRateLimit to 0 (disable RL) + const setOwnerRateLimit = api.tx.AdminUtils.sudo_set_owner_hparam_rate_limit({ epochs: 0 }) const sudoOwnerRateTx = api.tx.Sudo.sudo({ call: setOwnerRateLimit.decodedCall }) await waitForTransactionWithRetry(api, sudoOwnerRateTx, alice) } diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 085ee56b03..186973a6d0 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -353,9 +353,9 @@ mod benchmarks { } #[benchmark] - fn sudo_set_owner_hparam_tempos() { + fn sudo_set_owner_hparam_rate_limit() { #[extrinsic_call] - _(RawOrigin::Root, 2u16/*tempos*/)/*sudo_set_owner_hparam_tempos*/; + _(RawOrigin::Root, 2u16/*epochs*/)/*sudo_set_owner_hparam_rate_limit*/; } #[benchmark] diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 4f2170599c..2d66f9a3d4 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1827,17 +1827,17 @@ pub mod pallet { Ok(()) } - /// Sets the owner hyperparameter rate limit in tempos (global multiplier). + /// Sets the owner hyperparameter rate limit in epochs (global multiplier). /// Only callable by root. #[pallet::call_index(75)] #[pallet::weight((0, DispatchClass::Operational, Pays::No))] - pub fn sudo_set_owner_hparam_tempos( + pub fn sudo_set_owner_hparam_rate_limit( origin: OriginFor, - tempos: u16, + epochs: u16, ) -> DispatchResult { ensure_root(origin)?; - pallet_subtensor::Pallet::::set_owner_hyperparam_tempos(tempos); - log::debug!("OwnerHyperparamTemposSet( tempos: {tempos:?} ) "); + pallet_subtensor::Pallet::::set_owner_hyperparam_rate_limit(epochs); + log::debug!("OwnerHyperparamRateLimitSet( epochs: {epochs:?} ) "); Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 79ea690706..85d284c402 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1975,17 +1975,17 @@ fn test_sudo_set_admin_freeze_window_and_rate() { // Owner hyperparam tempos setter assert_eq!( - AdminUtils::sudo_set_owner_hparam_tempos( + AdminUtils::sudo_set_owner_hparam_rate_limit( <::RuntimeOrigin>::signed(U256::from(1)), 5 ), Err(DispatchError::BadOrigin) ); - assert_ok!(AdminUtils::sudo_set_owner_hparam_tempos( + assert_ok!(AdminUtils::sudo_set_owner_hparam_rate_limit( <::RuntimeOrigin>::root(), 5 )); - assert_eq!(pallet_subtensor::OwnerHyperparamTempos::::get(), 5); + assert_eq!(pallet_subtensor::OwnerHyperparamRateLimit::::get(), 5); }); } @@ -2169,11 +2169,7 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() { // Immediate second update should fail due to tempo-based RL assert_noop!( - AdminUtils::sudo_set_kappa( - <::RuntimeOrigin>::signed(owner), - netuid, - 2 - ), + AdminUtils::sudo_set_kappa(<::RuntimeOrigin>::signed(owner), netuid, 2), SubtensorError::::TxRateLimitExceeded ); diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 235e8bee76..382d0692e2 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -868,7 +868,7 @@ pub mod pallet { #[pallet::type_value] /// Default number of tempos for owner hyperparameter update rate limit - pub fn DefaultOwnerHyperparamTempos() -> u16 { + pub fn DefaultOwnerHyperparamRateLimit() -> u16 { 2 } @@ -888,9 +888,9 @@ pub mod pallet { StorageValue<_, u16, ValueQuery, DefaultAdminFreezeWindow>; #[pallet::storage] - /// Global number of tempos used to rate limit subnet owner hyperparameter updates - pub type OwnerHyperparamTempos = - StorageValue<_, u16, ValueQuery, DefaultOwnerHyperparamTempos>; + /// Global number of epochs used to rate limit subnet owner hyperparameter updates + pub type OwnerHyperparamRateLimit = + StorageValue<_, u16, ValueQuery, DefaultOwnerHyperparamRateLimit>; #[pallet::storage] pub type ColdkeySwapScheduleDuration = diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2400e3ef8c..bb41c063ea 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -116,8 +116,8 @@ mod events { TxChildKeyTakeRateLimitSet(u64), /// setting the admin freeze window length (last N blocks of tempo) AdminFreezeWindowSet(u16), - /// setting the owner hyperparameter rate limit in tempos - OwnerHyperparamTemposSet(u16), + /// setting the owner hyperparameter rate limit in epochs + OwnerHyperparamRateLimitSet(u16), /// minimum childkey take set MinChildKeyTakeSet(u16), /// maximum childkey take set diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index d23d5884f2..cbb72eed1f 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -139,7 +139,7 @@ mod hooks { // Migrate last block rate limiting storage items .saturating_add(migrations::migrate_rate_limiting_last_blocks::migrate_obsolete_rate_limiting_last_blocks_storage::()) // Remove deprecated OwnerHyperparamRateLimit storage item - .saturating_add(migrations::migrate_owner_hparam_rl_to_tempos::migrate_owner_hyperparam_rl_to_tempos::()) + .saturating_add(migrations::migrate_owner_hparam_rl_to_epochs::migrate_owner_hyperparam_rl_to_epochs::()) // Migrate remove network modality .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()); weight diff --git a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs similarity index 65% rename from pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs rename to pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs index 9a5d8b4842..501c00c4ad 100644 --- a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_tempos.rs +++ b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs @@ -5,15 +5,16 @@ use frame_support::weights::Weight; use sp_io::hashing::twox_128; use sp_io::storage::{clear, get}; -/// Remove the deprecated OwnerHyperparamRateLimit storage item. -/// If the old value was 0 (disabled), preserve that by setting OwnerHyperparamTempos to 0. -/// Otherwise, leave the new storage at its default (2 tempos). -pub fn migrate_owner_hyperparam_rl_to_tempos() -> Weight { - let migration_name = b"migrate_owner_hyperparam_rl_to_tempos".to_vec(); +/// Migrate u64 to u16 in OwnerHyperparamRateLimit and new default +pub fn migrate_owner_hyperparam_rl_to_epochs() -> Weight { + let migration_name = b"migrate_owner_hyperparam_rl_to_epochs".to_vec(); let mut weight = T::DbWeight::get().reads(1); if HasMigrationRun::::get(&migration_name) { - log::info!("Migration '{:?}' already executed. Skipping.", migration_name); + log::info!( + "Migration '{:?}' already executed. Skipping.", + migration_name + ); return weight; } @@ -25,11 +26,10 @@ pub fn migrate_owner_hyperparam_rl_to_tempos() -> Weight { if let Ok(old_limit_blocks) = ::decode(&mut &value_bytes[..]) { if old_limit_blocks == 0u64 { // Preserve disabled state - Pallet::::set_owner_hyperparam_tempos(0); + Pallet::::set_owner_hyperparam_rate_limit(0); } } - clear(&full_key); weight = weight.saturating_add(T::DbWeight::get().writes(1)); } diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index b5818add7b..9f0a6bce7f 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -20,10 +20,10 @@ pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; pub mod migrate_orphaned_storage_items; +pub mod migrate_owner_hparam_rl_to_epochs; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_rate_limiting_last_blocks; -pub mod migrate_owner_hparam_rl_to_tempos; pub mod migrate_remove_commitments_rate_limit; pub mod migrate_remove_network_modality; pub mod migrate_remove_stake_map; diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index 2298a7d889..22a8dbd029 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -65,11 +65,11 @@ fn ensure_subnet_owner_or_root_distinguishes_root_and_owner() { fn ensure_root_with_rate_limit_blocks_in_freeze_window() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1); - let tempo: u16 = 10; + let tempo = 10; add_network(netuid, 10, 0); // Set freeze window to 3 - let freeze_window: u16 = 3; + let freeze_window = 3; crate::Pallet::::set_admin_freeze_window(freeze_window); run_to_block((tempo - freeze_window + 1).into()); @@ -96,10 +96,9 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { SubnetOwner::::insert(netuid, owner); // Set freeze window to 0 initially to avoid blocking when tempo is small let freeze_window = 3; - crate::Pallet::::set_admin_freeze_window(0); + crate::Pallet::::set_admin_freeze_window(freeze_window); - // Set tempo to 1 so owner hyperparam RL = 2 blocks - crate::Pallet::::set_tempo(netuid, 1); + crate::Pallet::::set_owner_hyperparam_rate_limit(1); // Outside freeze window initially; should pass and return Some(owner) let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 28e3599368..0dde6345a2 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -116,9 +116,9 @@ impl Pallet { Self::deposit_event(Event::AdminFreezeWindowSet(window)); } - pub fn set_owner_hyperparam_tempos(tempos: u16) { - OwnerHyperparamTempos::::set(tempos); - Self::deposit_event(Event::OwnerHyperparamTemposSet(tempos)); + pub fn set_owner_hyperparam_rate_limit(epochs: u16) { + OwnerHyperparamRateLimit::::set(epochs); + Self::deposit_event(Event::OwnerHyperparamRateLimitSet(epochs)); } /// If owner is `Some`, record last-blocks for the provided `TransactionType`s. diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index eea9fdeaf0..9221974d12 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -116,6 +116,7 @@ impl From for TransactionType { } } } + impl Pallet { // ======================== // ==== Rate Limiting ===== @@ -163,8 +164,8 @@ impl Pallet { | TransactionType::OwnerToggleYuma3Enabled | TransactionType::OwnerToggleBondsReset | TransactionType::OwnerSetOwnerImmuneNeuronLimit => { - let tempos = OwnerHyperparamTempos::::get() as u64; - (Tempo::::get(netuid) as u64).saturating_mul(tempos) + let epochs = OwnerHyperparamRateLimit::::get() as u64; + (Tempo::::get(netuid) as u64).saturating_mul(epochs) } TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), From a3fbfec9a7728e173892a540919d24fb82aea0b6 Mon Sep 17 00:00:00 2001 From: 0xcacti <0xcacti@gmail.com> Date: Fri, 12 Sep 2025 11:02:58 -0400 Subject: [PATCH 230/379] move back to safe_div_or --- pallets/subtensor/src/coinbase/run_coinbase.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index b2872f998a..4bf2ae5e34 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -70,10 +70,10 @@ impl Pallet { .unwrap_or(asfloat!(0.0)); log::debug!("default_tao_in_i: {default_tao_in_i:?}"); // Get alpha_emission total - let alpha_emission_i: U96F32 = asfloat!( - Self::get_block_emission_for_issuance(Self::get_alpha_issuance(*netuid_i).into()) - .unwrap_or(0) - ); + let alpha_emission_i: U96F32 = asfloat!(Self::get_block_emission_for_issuance( + Self::get_alpha_issuance(*netuid_i).into() + ) + .unwrap_or(0)); log::debug!("alpha_emission_i: {alpha_emission_i:?}"); // Get initial alpha_in @@ -103,7 +103,7 @@ impl Pallet { is_subsidized.insert(*netuid_i, true); } else { tao_in_i = default_tao_in_i; - alpha_in_i = tao_in_i.safe_div(price_i); // Must panic if price_i is 0. + alpha_in_i = tao_in_i.safe_div_or(price_i, alpha_emission_i); is_subsidized.insert(*netuid_i, false); } log::debug!("alpha_in_i: {alpha_in_i:?}"); @@ -209,7 +209,7 @@ impl Pallet { let root_alpha: U96F32 = root_proportion .saturating_mul(alpha_out_i) // Total alpha emission per block remaining. .saturating_mul(asfloat!(0.5)); // 50% to validators. - // Remove root alpha from alpha_out. + // Remove root alpha from alpha_out. log::debug!("root_alpha: {root_alpha:?}"); // Get pending alpha as original alpha_out - root_alpha. let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); From bb7d9c31ce7c5b84528a4369d97848a3ccdc7120 Mon Sep 17 00:00:00 2001 From: bdhimes Date: Fri, 12 Sep 2025 17:15:12 +0200 Subject: [PATCH 231/379] Bump spec version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d49d5147e9..aa87c52a19 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 316, + spec_version: 317, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 12dcb9129c342ca3461f9fef01847eb22920698b Mon Sep 17 00:00:00 2001 From: 0xcacti <0xcacti@gmail.com> Date: Fri, 12 Sep 2025 12:09:53 -0400 Subject: [PATCH 232/379] fmt --- pallets/subtensor/src/coinbase/run_coinbase.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 4bf2ae5e34..d284f33eda 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -70,10 +70,10 @@ impl Pallet { .unwrap_or(asfloat!(0.0)); log::debug!("default_tao_in_i: {default_tao_in_i:?}"); // Get alpha_emission total - let alpha_emission_i: U96F32 = asfloat!(Self::get_block_emission_for_issuance( - Self::get_alpha_issuance(*netuid_i).into() - ) - .unwrap_or(0)); + let alpha_emission_i: U96F32 = asfloat!( + Self::get_block_emission_for_issuance(Self::get_alpha_issuance(*netuid_i).into()) + .unwrap_or(0) + ); log::debug!("alpha_emission_i: {alpha_emission_i:?}"); // Get initial alpha_in @@ -209,7 +209,7 @@ impl Pallet { let root_alpha: U96F32 = root_proportion .saturating_mul(alpha_out_i) // Total alpha emission per block remaining. .saturating_mul(asfloat!(0.5)); // 50% to validators. - // Remove root alpha from alpha_out. + // Remove root alpha from alpha_out. log::debug!("root_alpha: {root_alpha:?}"); // Get pending alpha as original alpha_out - root_alpha. let pending_alpha: U96F32 = alpha_out_i.saturating_sub(root_alpha); From b54ca79581eb5d57af08011711fff0fed348e3f3 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Fri, 12 Sep 2025 13:13:14 -0300 Subject: [PATCH 233/379] fix get_immune_owner_hotkeys --- pallets/subtensor/src/subnets/registration.rs | 32 ++++++++++++++----- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index 6f11921dba..a620755f2e 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -389,6 +389,20 @@ impl Pallet { } fn get_immune_owner_hotkeys(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(_, hk)| hk) + .collect() + } + + pub fn get_immune_owner_uids(netuid: NetUid, coldkey: &T::AccountId) -> Vec { + Self::get_immune_owner_tuples(netuid, coldkey) + .into_iter() + .map(|(uid, _)| uid) + .collect() + } + + fn get_immune_owner_tuples(netuid: NetUid, coldkey: &T::AccountId) -> Vec<(u16, T::AccountId)> { // Gather (block, uid, hotkey) only for hotkeys that have a UID and a registration block. let mut triples: Vec<(u64, u16, T::AccountId)> = OwnedHotkeys::::get(coldkey) .into_iter() @@ -411,22 +425,24 @@ impl Pallet { triples.truncate(limit); } - // Project to just hotkeys - let mut immune_hotkeys: Vec = - triples.into_iter().map(|(_, _, hk)| hk).collect(); + // Project to uid/hotkey tuple + let mut immune_tuples: Vec<(u16, T::AccountId)> = + triples.into_iter().map(|(_, uid, hk)| (uid, hk)).collect(); // Insert subnet owner hotkey in the beginning of the list if valid and not // already present if let Ok(owner_hk) = SubnetOwnerHotkey::::try_get(netuid) { - if Uids::::get(netuid, &owner_hk).is_some() && !immune_hotkeys.contains(&owner_hk) { - immune_hotkeys.insert(0, owner_hk); - if immune_hotkeys.len() > limit { - immune_hotkeys.truncate(limit); + if let Some(owner_uid) = Uids::::get(netuid, &owner_hk) { + if !immune_tuples.contains(&(owner_uid, owner_hk.clone())) { + immune_tuples.insert(0, (owner_uid, owner_hk.clone())); + if immune_tuples.len() > limit { + immune_tuples.truncate(limit); + } } } } - immune_hotkeys + immune_tuples } /// Determine which peer to prune from the network by finding the element with the lowest pruning score out of From b0f963ba88251bc90385937d8c465d0177a7bd77 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 18:47:55 +0300 Subject: [PATCH 234/379] Refactor TransactionType --- pallets/admin-utils/src/lib.rs | 94 ++--- pallets/admin-utils/src/tests/mod.rs | 20 +- pallets/subtensor/src/lib.rs | 4 +- .../migrate_owner_hparam_rl_to_epochs.rs | 2 +- pallets/subtensor/src/staking/set_children.rs | 27 +- pallets/subtensor/src/subnets/subnet.rs | 8 +- pallets/subtensor/src/tests/children.rs | 24 +- pallets/subtensor/src/tests/ensure.rs | 29 +- pallets/subtensor/src/tests/mock.rs | 2 +- .../subtensor/src/transaction_extension.rs | 2 +- pallets/subtensor/src/utils/misc.rs | 6 +- pallets/subtensor/src/utils/rate_limiting.rs | 321 ++++++++---------- 12 files changed, 229 insertions(+), 310 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2d66f9a3d4..4f63fdeaa4 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -25,7 +25,7 @@ pub mod pallet { use frame_support::{dispatch::DispatchResult, pallet_prelude::StorageMap}; use frame_system::pallet_prelude::*; use pallet_evm_chain_id::{self, ChainId}; - use pallet_subtensor::utils::rate_limiting::TransactionType; + use pallet_subtensor::utils::rate_limiting::{Hyperparameter, TransactionType}; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; @@ -216,14 +216,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetServingRateLimit], + &[Hyperparameter::ServingRateLimit.into()], )?; pallet_subtensor::Pallet::::set_serving_rate_limit(netuid, serving_rate_limit); log::debug!("ServingRateLimitSet( serving_rate_limit: {serving_rate_limit:?} ) "); pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetServingRateLimit], + &[Hyperparameter::ServingRateLimit.into()], ); Ok(()) } @@ -268,7 +268,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetMaxDifficulty], + &[Hyperparameter::MaxDifficulty.into()], )?; ensure!( @@ -282,7 +282,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetMaxDifficulty], + &[Hyperparameter::MaxDifficulty.into()], ); Ok(()) } @@ -393,7 +393,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetAdjustmentAlpha], + &[Hyperparameter::AdjustmentAlpha.into()], )?; ensure!( @@ -404,7 +404,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetAdjustmentAlpha], + &[Hyperparameter::AdjustmentAlpha.into()], ); log::debug!("AdjustmentAlphaSet( adjustment_alpha: {adjustment_alpha:?} ) "); Ok(()) @@ -425,7 +425,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetMaxWeightLimit], + &[Hyperparameter::MaxWeightLimit.into()], )?; ensure!( @@ -436,7 +436,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetMaxWeightLimit], + &[Hyperparameter::MaxWeightLimit.into()], ); log::debug!( "MaxWeightLimitSet( netuid: {netuid:?} max_weight_limit: {max_weight_limit:?} ) " @@ -459,7 +459,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetImmunityPeriod], + &[Hyperparameter::ImmunityPeriod.into()], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -470,7 +470,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetImmunityPeriod], + &[Hyperparameter::ImmunityPeriod.into()], ); log::debug!( "ImmunityPeriodSet( netuid: {netuid:?} immunity_period: {immunity_period:?} ) " @@ -493,7 +493,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetMinAllowedWeights], + &[Hyperparameter::MinAllowedWeights.into()], )?; ensure!( @@ -507,7 +507,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetMinAllowedWeights], + &[Hyperparameter::MinAllowedWeights.into()], ); Ok(()) } @@ -551,7 +551,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetKappa], + &[Hyperparameter::Kappa.into()], )?; ensure!( @@ -563,7 +563,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetKappa], + &[Hyperparameter::Kappa.into()], ); Ok(()) } @@ -579,7 +579,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetRho], + &[Hyperparameter::Rho.into()], )?; ensure!( @@ -591,7 +591,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetRho], + &[Hyperparameter::Rho.into()], ); Ok(()) } @@ -611,7 +611,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetActivityCutoff], + &[Hyperparameter::ActivityCutoff.into()], )?; ensure!( @@ -631,7 +631,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetActivityCutoff], + &[Hyperparameter::ActivityCutoff.into()], ); Ok(()) } @@ -679,7 +679,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetPowRegistrationAllowed], + &[Hyperparameter::PowRegistrationAllowed.into()], )?; pallet_subtensor::Pallet::::set_network_pow_registration_allowed( @@ -692,7 +692,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetPowRegistrationAllowed], + &[Hyperparameter::PowRegistrationAllowed.into()], ); Ok(()) } @@ -740,7 +740,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetMinBurn], + &[Hyperparameter::MinBurn.into()], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -760,7 +760,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetMinBurn], + &[Hyperparameter::MinBurn.into()], ); Ok(()) } @@ -780,7 +780,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetMaxBurn], + &[Hyperparameter::MaxBurn.into()], )?; ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), @@ -800,7 +800,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetMaxBurn], + &[Hyperparameter::MaxBurn.into()], ); Ok(()) } @@ -875,7 +875,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetBondsMovingAverage], + &[Hyperparameter::BondsMovingAverage.into()], )?; if maybe_owner.is_some() { ensure!( @@ -895,7 +895,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetBondsMovingAverage], + &[Hyperparameter::BondsMovingAverage.into()], ); Ok(()) } @@ -915,7 +915,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetBondsPenalty], + &[Hyperparameter::BondsPenalty.into()], )?; ensure!( @@ -927,7 +927,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetBondsPenalty], + &[Hyperparameter::BondsPenalty.into()], ); Ok(()) } @@ -1199,7 +1199,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerToggleCommitReveal], + &[Hyperparameter::CommitRevealEnabled.into()], )?; ensure!( @@ -1212,7 +1212,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerToggleCommitReveal], + &[Hyperparameter::CommitRevealEnabled.into()], ); Ok(()) } @@ -1236,14 +1236,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerToggleLiquidAlphaEnabled], + &[Hyperparameter::LiquidAlphaEnabled.into()], )?; pallet_subtensor::Pallet::::set_liquid_alpha_enabled(netuid, enabled); log::debug!("LiquidAlphaEnableToggled( netuid: {netuid:?}, Enabled: {enabled:?} ) "); pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerToggleLiquidAlphaEnabled], + &[Hyperparameter::LiquidAlphaEnabled.into()], ); Ok(()) } @@ -1260,7 +1260,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::OwnerSetAlphaValues], + &[Hyperparameter::AlphaValues.into()], )?; let res = pallet_subtensor::Pallet::::do_set_alpha_values( origin, netuid, alpha_low, alpha_high, @@ -1269,7 +1269,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetAlphaValues], + &[Hyperparameter::AlphaValues.into()], ); } res @@ -1367,7 +1367,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetWeightCommitInterval], + &[Hyperparameter::WeightCommitInterval.into()], )?; ensure!( @@ -1381,7 +1381,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetWeightCommitInterval], + &[Hyperparameter::WeightCommitInterval.into()], ); Ok(()) @@ -1459,14 +1459,14 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerToggleTransfer], + &[Hyperparameter::TransferEnabled.into()], )?; let res = pallet_subtensor::Pallet::::toggle_transfer(netuid, toggle); if res.is_ok() { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerToggleTransfer], + &[Hyperparameter::TransferEnabled.into()], ); } res @@ -1601,7 +1601,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::OwnerSetAlphaSigmoidSteepness], + &[Hyperparameter::AlphaSigmoidSteepness.into()], )?; ensure!( @@ -1621,7 +1621,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetAlphaSigmoidSteepness], + &[Hyperparameter::AlphaSigmoidSteepness.into()], ); Ok(()) } @@ -1645,7 +1645,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerToggleYuma3Enabled], + &[Hyperparameter::Yuma3Enabled.into()], )?; pallet_subtensor::Pallet::::set_yuma3_enabled(netuid, enabled); @@ -1654,7 +1654,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerToggleYuma3Enabled], + &[Hyperparameter::Yuma3Enabled.into()], ); Ok(()) } @@ -1678,7 +1678,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerToggleBondsReset], + &[Hyperparameter::BondsResetEnabled.into()], )?; pallet_subtensor::Pallet::::set_bonds_reset(netuid, enabled); @@ -1687,7 +1687,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerToggleBondsReset], + &[Hyperparameter::BondsResetEnabled.into()], ); Ok(()) } @@ -1791,13 +1791,13 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::OwnerSetOwnerImmuneNeuronLimit], + &[Hyperparameter::ImmuneNeuronLimit.into()], )?; pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::OwnerSetOwnerImmuneNeuronLimit], + &[Hyperparameter::ImmuneNeuronLimit.into()], ); Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 85d284c402..0e6fdc23b7 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -7,7 +7,7 @@ use frame_support::{ use frame_system::Config; use pallet_subtensor::{Error as SubtensorError, SubnetOwner, Tempo, WeightsVersionKeyRateLimit}; // use pallet_subtensor::{migrations, Event}; -use pallet_subtensor::Event; +use pallet_subtensor::{Event, utils::rate_limiting::TransactionType}; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; @@ -188,11 +188,10 @@ fn test_sudo_set_weights_version_key_rate_limit() { // Try to set again with // Assert rate limit not passed - assert!(!SubtensorModule::passes_rate_limit_on_subnet( - &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, - &sn_owner, - netuid - )); + assert!( + !TransactionType::SetWeightsVersionKey + .passes_rate_limit_on_subnet::(&sn_owner, netuid) + ); // Try transaction assert_noop!( @@ -206,11 +205,10 @@ fn test_sudo_set_weights_version_key_rate_limit() { // Wait for rate limit to pass run_to_block(rate_limit_period + 1); - assert!(SubtensorModule::passes_rate_limit_on_subnet( - &pallet_subtensor::utils::rate_limiting::TransactionType::SetWeightsVersionKey, - &sn_owner, - netuid - )); + assert!( + TransactionType::SetWeightsVersionKey + .passes_rate_limit_on_subnet::(&sn_owner, netuid) + ); // Try transaction assert_ok!(AdminUtils::sudo_set_weights_version_key( diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 382d0692e2..d2cb54306f 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -42,7 +42,7 @@ pub mod staking; pub mod subnets; pub mod swap; pub mod utils; -use crate::utils::rate_limiting::TransactionType; +use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; use macros::{config, dispatches, errors, events, genesis, hooks}; #[cfg(test)] @@ -2189,7 +2189,7 @@ pub enum RateLimitKey { // The setting sn owner hotkey operation is rate limited per netuid SetSNOwnerHotkey(NetUid), // Generic rate limit for subnet-owner hyperparameter updates (per netuid) - OwnerHyperparamUpdate(NetUid), + OwnerHyperparamUpdate(NetUid, Hyperparameter), // Subnet registration rate limit NetworkLastRegistered, // Last tx block limit per account ID diff --git a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs index 501c00c4ad..4a1bfa48be 100644 --- a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs +++ b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs @@ -3,7 +3,7 @@ use crate::HasMigrationRun; use codec::Decode; use frame_support::weights::Weight; use sp_io::hashing::twox_128; -use sp_io::storage::{clear, get}; +use sp_io::storage::get; /// Migrate u64 to u16 in OwnerHyperparamRateLimit and new default pub fn migrate_owner_hyperparam_rl_to_epochs() -> Weight { diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index 139ea82c5d..c6c37f7e96 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -48,10 +48,9 @@ impl Pallet { // Ensure the hotkey passes the rate limit. ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetChildren, // Set children. - &hotkey, // Specific to a hotkey. - netuid, // Specific to a subnet. + TransactionType::SetChildren.passes_rate_limit_on_subnet::( + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. ), Error::::TxRateLimitExceeded ); @@ -111,12 +110,7 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( - &hotkey, - netuid, - &TransactionType::SetChildren, - current_block, - ); + TransactionType::SetChildren.set_last_block_on_subnet::(&hotkey, netuid, current_block); // Calculate cool-down block let cooldown_block = @@ -319,10 +313,9 @@ impl Pallet { if take > current_take { // Ensure the hotkey passes the rate limit. ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetChildkeyTake, // Set childkey take. - &hotkey, // Specific to a hotkey. - netuid, // Specific to a subnet. + TransactionType::SetChildkeyTake.passes_rate_limit_on_subnet::( + &hotkey, // Specific to a hotkey. + netuid, // Specific to a subnet. ), Error::::TxChildkeyTakeRateLimitExceeded ); @@ -330,10 +323,9 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( + TransactionType::SetChildkeyTake.set_last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, current_block, ); @@ -341,10 +333,9 @@ impl Pallet { ChildkeyTake::::insert(hotkey.clone(), netuid, take); // Update the last transaction block - Self::set_last_transaction_block_on_subnet( + TransactionType::SetChildkeyTake.set_last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, current_block, ); diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 38be89cba0..6241c54ef7 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -135,7 +135,7 @@ impl Pallet { // --- 4. Rate limit for network registrations. let current_block = Self::get_current_block_as_u64(); ensure!( - Self::passes_rate_limit(&TransactionType::RegisterNetwork, &coldkey), + TransactionType::RegisterNetwork.passes_rate_limit::(&coldkey), Error::::NetworkTxRateLimitExceeded ); @@ -391,8 +391,7 @@ impl Pallet { // Rate limit: 1 call per week ensure!( - Self::passes_rate_limit_on_subnet( - &TransactionType::SetSNOwnerHotkey, + TransactionType::SetSNOwnerHotkey.passes_rate_limit_on_subnet::( hotkey, // ignored netuid, // Specific to a subnet. ), @@ -401,10 +400,9 @@ impl Pallet { // Set last transaction block let current_block = Self::get_current_block_as_u64(); - Self::set_last_transaction_block_on_subnet( + TransactionType::SetSNOwnerHotkey.set_last_block_on_subnet::( hotkey, netuid, - &TransactionType::SetSNOwnerHotkey, current_block, ); diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 1208add954..67dfe47fbe 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -955,17 +955,15 @@ fn test_childkey_take_rate_limiting() { // Helper function to log rate limit information let log_rate_limit_info = || { let current_block = SubtensorModule::get_current_block_as_u64(); - let last_block = SubtensorModule::get_last_transaction_block_on_subnet( + let last_block = TransactionType::SetChildkeyTake.last_block_on_subnet::( &hotkey, netuid, - &TransactionType::SetChildkeyTake, ); - let passes = SubtensorModule::passes_rate_limit_on_subnet( - &TransactionType::SetChildkeyTake, + let passes = TransactionType::SetChildkeyTake.passes_rate_limit_on_subnet::( &hotkey, netuid, ); - let limit = SubtensorModule::get_rate_limit_on_subnet(&TransactionType::SetChildkeyTake, netuid); + let limit = TransactionType::SetChildkeyTake.rate_limit_on_subnet::(netuid); log::info!( "Rate limit info: current_block: {}, last_block: {}, limit: {}, passes: {}, diff: {}", current_block, @@ -2489,12 +2487,7 @@ fn test_revoke_child_no_min_stake_check() { assert_eq!(children_after, vec![(proportion, child)]); // Bypass tx rate limit - SubtensorModule::set_last_transaction_block_on_subnet( - &parent, - netuid, - &TransactionType::SetChildren, - 0, - ); + TransactionType::SetChildren.set_last_block_on_subnet::(&parent, netuid, 0); // Schedule parent-child relationship revokation assert_ok!(SubtensorModule::do_schedule_children( @@ -2609,18 +2602,13 @@ fn test_set_children_rate_limit_fail_then_succeed() { // Try again after rate limit period has passed // Check rate limit - let limit = - SubtensorModule::get_rate_limit_on_subnet(&TransactionType::SetChildren, netuid); + let limit = TransactionType::SetChildren.rate_limit_on_subnet::(netuid); // Step that many blocks step_block(limit as u16); // Verify rate limit passes - assert!(SubtensorModule::passes_rate_limit_on_subnet( - &TransactionType::SetChildren, - &hotkey, - netuid - )); + assert!(TransactionType::SetChildren.passes_rate_limit_on_subnet::(&hotkey, netuid)); // Try again mock_set_children(&coldkey, &hotkey, netuid, &[(100, child2)]); diff --git a/pallets/subtensor/src/tests/ensure.rs b/pallets/subtensor/src/tests/ensure.rs index 22a8dbd029..298339defa 100644 --- a/pallets/subtensor/src/tests/ensure.rs +++ b/pallets/subtensor/src/tests/ensure.rs @@ -4,8 +4,8 @@ use sp_core::U256; use subtensor_runtime_common::NetUid; use super::mock::*; -use crate::utils::rate_limiting::TransactionType; -use crate::{RateLimitKey, SubnetOwner, SubtokenEnabled}; +use crate::utils::rate_limiting::{Hyperparameter, TransactionType}; +use crate::{OwnerHyperparamRateLimit, SubnetOwner, SubtokenEnabled}; #[test] fn ensure_subnet_owner_returns_who_and_checks_ownership() { @@ -95,33 +95,31 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { let owner: U256 = U256::from(5); SubnetOwner::::insert(netuid, owner); // Set freeze window to 0 initially to avoid blocking when tempo is small - let freeze_window = 3; - crate::Pallet::::set_admin_freeze_window(freeze_window); + crate::Pallet::::set_admin_freeze_window(0); + + // Set tempo to 1 so owner hyperparam RL = 2 blocks + crate::Pallet::::set_tempo(netuid, 1); - crate::Pallet::::set_owner_hyperparam_rate_limit(1); + assert_eq!(OwnerHyperparamRateLimit::::get(), 2); // Outside freeze window initially; should pass and return Some(owner) let res = crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerSetKappa], + &[Hyperparameter::Kappa.into()], ) .expect("should pass"); assert_eq!(res, Some(owner)); // Simulate previous update at current block -> next call should fail due to rate limit let now = crate::Pallet::::get_current_block_as_u64(); - crate::Pallet::::set_last_transaction_block_on_subnet( - &owner, - netuid, - &TransactionType::OwnerSetKappa, - now, - ); + TransactionType::from(Hyperparameter::Kappa) + .set_last_block_on_subnet::(&owner, netuid, now); assert_noop!( crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerSetKappa], + &[Hyperparameter::Kappa.into()], ), crate::Error::::TxRateLimitExceeded ); @@ -131,12 +129,13 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { assert_ok!(crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerSetKappa] + &[Hyperparameter::Kappa.into()] )); // Now advance into the freeze window; ensure blocks // (using loop for clarity, because epoch calculation function uses netuid) // Restore tempo and configure freeze window for this part + let freeze_window = 3; crate::Pallet::::set_tempo(netuid, tempo); crate::Pallet::::set_admin_freeze_window(freeze_window); let freeze_window = freeze_window as u64; @@ -152,7 +151,7 @@ fn ensure_owner_or_root_with_limits_checks_rl_and_freeze() { crate::Pallet::::ensure_sn_owner_or_root_with_limits( <::RuntimeOrigin>::signed(owner), netuid, - &[TransactionType::OwnerSetKappa], + &[Hyperparameter::Kappa.into()], ), crate::Error::::AdminActionProhibitedDuringWeightsWindow ); diff --git a/pallets/subtensor/src/tests/mock.rs b/pallets/subtensor/src/tests/mock.rs index 28da96687a..672b1e44dd 100644 --- a/pallets/subtensor/src/tests/mock.rs +++ b/pallets/subtensor/src/tests/mock.rs @@ -948,7 +948,7 @@ pub fn mock_set_children_no_epochs(netuid: NetUid, parent: &U256, child_vec: &[( #[allow(dead_code)] pub fn step_rate_limit(transaction_type: &TransactionType, netuid: NetUid) { // Check rate limit - let limit = SubtensorModule::get_rate_limit_on_subnet(transaction_type, netuid); + let limit = transaction_type.rate_limit_on_subnet::(netuid); // Step that many blocks step_block(limit as u16); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index 62c6a3c8ca..b56dff0ea0 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -279,7 +279,7 @@ where .map(|validity| (validity, Some(who.clone()), origin.clone())) } Some(Call::register_network { .. }) => { - if !Pallet::::passes_rate_limit(&TransactionType::RegisterNetwork, who) { + if !TransactionType::RegisterNetwork.passes_rate_limit::(who) { return Err(CustomTransactionError::RateLimitExceeded.into()); } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 0dde6345a2..ef04a8ada8 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -62,7 +62,7 @@ impl Pallet { if let Some(who) = maybe_who.as_ref() { for tx in limits.iter() { ensure!( - Self::passes_rate_limit_on_subnet(tx, who, netuid), + tx.passes_rate_limit_on_subnet::(who, netuid), Error::::TxRateLimitExceeded ); } @@ -83,7 +83,7 @@ impl Pallet { Self::ensure_not_in_admin_freeze_window(netuid, now)?; for tx in limits.iter() { ensure!( - Self::passes_rate_limit_on_subnet(tx, &who, netuid), + tx.passes_rate_limit_on_subnet::(&who, netuid), Error::::TxRateLimitExceeded ); } @@ -130,7 +130,7 @@ impl Pallet { if let Some(who) = maybe_owner { let now = Self::get_current_block_as_u64(); for tx in txs { - Self::set_last_transaction_block_on_subnet(&who, netuid, tx, now); + tx.set_last_block_on_subnet::(&who, netuid, now); } } } diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 9221974d12..463ba4b8cd 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -4,6 +4,7 @@ use super::*; /// Enum representing different types of transactions #[derive(Copy, Clone)] +#[non_exhaustive] pub enum TransactionType { SetChildren, SetChildkeyTake, @@ -11,245 +12,189 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, - OwnerHyperparamUpdate, // Deprecated aggregate; keep for compatibility if referenced in tests + OwnerHyperparamUpdate(Hyperparameter), SubsubnetParameterUpdate, - // Per-hyperparameter owner updates (rate-limited independently) - OwnerSetServingRateLimit, - OwnerSetMaxDifficulty, - OwnerSetAdjustmentAlpha, - OwnerSetMaxWeightLimit, - OwnerSetImmunityPeriod, - OwnerSetMinAllowedWeights, - OwnerSetKappa, - OwnerSetRho, - OwnerSetActivityCutoff, - OwnerSetPowRegistrationAllowed, - OwnerSetMinBurn, - OwnerSetMaxBurn, - OwnerSetBondsMovingAverage, - OwnerSetBondsPenalty, - OwnerToggleCommitReveal, - OwnerToggleLiquidAlphaEnabled, - OwnerSetAlphaValues, - OwnerSetWeightCommitInterval, - OwnerToggleTransfer, - OwnerSetAlphaSigmoidSteepness, - OwnerToggleYuma3Enabled, - OwnerToggleBondsReset, - OwnerSetOwnerImmuneNeuronLimit, } -/// Implement conversion from TransactionType to u16 -impl From for u16 { - fn from(tx_type: TransactionType) -> Self { - match tx_type { - TransactionType::SetChildren => 0, - TransactionType::SetChildkeyTake => 1, - TransactionType::Unknown => 2, - TransactionType::RegisterNetwork => 3, - TransactionType::SetWeightsVersionKey => 4, - TransactionType::SetSNOwnerHotkey => 5, - TransactionType::OwnerHyperparamUpdate => 6, - TransactionType::SubsubnetParameterUpdate => 7, - TransactionType::OwnerSetServingRateLimit => 10, - TransactionType::OwnerSetMaxDifficulty => 11, - TransactionType::OwnerSetAdjustmentAlpha => 12, - TransactionType::OwnerSetMaxWeightLimit => 13, - TransactionType::OwnerSetImmunityPeriod => 14, - TransactionType::OwnerSetMinAllowedWeights => 15, - TransactionType::OwnerSetKappa => 16, - TransactionType::OwnerSetRho => 17, - TransactionType::OwnerSetActivityCutoff => 18, - TransactionType::OwnerSetPowRegistrationAllowed => 19, - TransactionType::OwnerSetMinBurn => 20, - TransactionType::OwnerSetMaxBurn => 21, - TransactionType::OwnerSetBondsMovingAverage => 22, - TransactionType::OwnerSetBondsPenalty => 23, - TransactionType::OwnerToggleCommitReveal => 24, - TransactionType::OwnerToggleLiquidAlphaEnabled => 25, - TransactionType::OwnerSetAlphaValues => 26, - TransactionType::OwnerSetWeightCommitInterval => 27, - TransactionType::OwnerToggleTransfer => 28, - TransactionType::OwnerSetAlphaSigmoidSteepness => 29, - TransactionType::OwnerToggleYuma3Enabled => 30, - TransactionType::OwnerToggleBondsReset => 31, - TransactionType::OwnerSetOwnerImmuneNeuronLimit => 32, - } - } -} - -/// Implement conversion from u16 to TransactionType -impl From for TransactionType { - fn from(value: u16) -> Self { - match value { - 0 => TransactionType::SetChildren, - 1 => TransactionType::SetChildkeyTake, - 3 => TransactionType::RegisterNetwork, - 4 => TransactionType::SetWeightsVersionKey, - 5 => TransactionType::SetSNOwnerHotkey, - 6 => TransactionType::OwnerHyperparamUpdate, - 7 => TransactionType::SubsubnetParameterUpdate, - 10 => TransactionType::OwnerSetServingRateLimit, - 11 => TransactionType::OwnerSetMaxDifficulty, - 12 => TransactionType::OwnerSetAdjustmentAlpha, - 13 => TransactionType::OwnerSetMaxWeightLimit, - 14 => TransactionType::OwnerSetImmunityPeriod, - 15 => TransactionType::OwnerSetMinAllowedWeights, - 16 => TransactionType::OwnerSetKappa, - 17 => TransactionType::OwnerSetRho, - 18 => TransactionType::OwnerSetActivityCutoff, - 19 => TransactionType::OwnerSetPowRegistrationAllowed, - 20 => TransactionType::OwnerSetMinBurn, - 21 => TransactionType::OwnerSetMaxBurn, - 22 => TransactionType::OwnerSetBondsMovingAverage, - 23 => TransactionType::OwnerSetBondsPenalty, - 24 => TransactionType::OwnerToggleCommitReveal, - 25 => TransactionType::OwnerToggleLiquidAlphaEnabled, - 26 => TransactionType::OwnerSetAlphaValues, - 27 => TransactionType::OwnerSetWeightCommitInterval, - 28 => TransactionType::OwnerToggleTransfer, - 29 => TransactionType::OwnerSetAlphaSigmoidSteepness, - 30 => TransactionType::OwnerToggleYuma3Enabled, - 31 => TransactionType::OwnerToggleBondsReset, - 32 => TransactionType::OwnerSetOwnerImmuneNeuronLimit, - _ => TransactionType::Unknown, - } - } -} - -impl Pallet { - // ======================== - // ==== Rate Limiting ===== - // ======================== +impl TransactionType { /// Get the rate limit for a specific transaction type - pub fn get_rate_limit(tx_type: &TransactionType) -> u64 { - match tx_type { - TransactionType::SetChildren => 150, // 30 minutes - TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), - TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), - - TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) + pub fn rate_limit(&self) -> u64 { + match self { + Self::SetChildren => 150, // 30 minutes + Self::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), + Self::RegisterNetwork => NetworkRateLimit::::get(), + Self::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), + + Self::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, } } - pub fn get_rate_limit_on_subnet(tx_type: &TransactionType, netuid: NetUid) -> u64 { + pub fn rate_limit_on_subnet(&self, netuid: NetUid) -> u64 { #[allow(clippy::match_single_binding)] - match tx_type { - TransactionType::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) + match self { + Self::SetWeightsVersionKey => (Tempo::::get(netuid) as u64) .saturating_mul(WeightsVersionKeyRateLimit::::get()), // Owner hyperparameter updates are rate-limited by N tempos on the subnet (sudo configurable) - TransactionType::OwnerHyperparamUpdate - | TransactionType::OwnerSetServingRateLimit - | TransactionType::OwnerSetMaxDifficulty - | TransactionType::OwnerSetAdjustmentAlpha - | TransactionType::OwnerSetMaxWeightLimit - | TransactionType::OwnerSetImmunityPeriod - | TransactionType::OwnerSetMinAllowedWeights - | TransactionType::OwnerSetKappa - | TransactionType::OwnerSetRho - | TransactionType::OwnerSetActivityCutoff - | TransactionType::OwnerSetPowRegistrationAllowed - | TransactionType::OwnerSetMinBurn - | TransactionType::OwnerSetMaxBurn - | TransactionType::OwnerSetBondsMovingAverage - | TransactionType::OwnerSetBondsPenalty - | TransactionType::OwnerToggleCommitReveal - | TransactionType::OwnerToggleLiquidAlphaEnabled - | TransactionType::OwnerSetAlphaValues - | TransactionType::OwnerSetWeightCommitInterval - | TransactionType::OwnerToggleTransfer - | TransactionType::OwnerSetAlphaSigmoidSteepness - | TransactionType::OwnerToggleYuma3Enabled - | TransactionType::OwnerToggleBondsReset - | TransactionType::OwnerSetOwnerImmuneNeuronLimit => { + Self::OwnerHyperparamUpdate(_) => { let epochs = OwnerHyperparamRateLimit::::get() as u64; (Tempo::::get(netuid) as u64).saturating_mul(epochs) } - TransactionType::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), + Self::SetSNOwnerHotkey => DefaultSetSNOwnerHotkeyRateLimit::::get(), - _ => Self::get_rate_limit(tx_type), + _ => self.rate_limit::(), } } + pub fn passes_rate_limit(&self, key: &T::AccountId) -> bool { + let block = Pallet::::get_current_block_as_u64(); + let limit = self.rate_limit::(); + let last_block = self.last_block::(key); + + Self::check_passes_rate_limit(limit, block, last_block) + } + pub fn check_passes_rate_limit(limit: u64, block: u64, last_block: u64) -> bool { // Allow the first transaction (when last_block is 0) or if the rate limit has passed last_block == 0 || block.saturating_sub(last_block) >= limit } - pub fn passes_rate_limit(tx_type: &TransactionType, key: &T::AccountId) -> bool { - let block: u64 = Self::get_current_block_as_u64(); - let limit: u64 = Self::get_rate_limit(tx_type); - let last_block: u64 = Self::get_last_transaction_block(key, tx_type); - - Self::check_passes_rate_limit(limit, block, last_block) - } - /// Check if a transaction should be rate limited on a specific subnet - pub fn passes_rate_limit_on_subnet( - tx_type: &TransactionType, + pub fn passes_rate_limit_on_subnet( + &self, hotkey: &T::AccountId, netuid: NetUid, ) -> bool { - let block: u64 = Self::get_current_block_as_u64(); - let limit: u64 = Self::get_rate_limit_on_subnet(tx_type, netuid); - let last_block: u64 = Self::get_last_transaction_block_on_subnet(hotkey, netuid, tx_type); + let block = Pallet::::get_current_block_as_u64(); + let limit = self.rate_limit_on_subnet::(netuid); + let last_block = self.last_block_on_subnet::(hotkey, netuid); Self::check_passes_rate_limit(limit, block, last_block) } /// Get the block number of the last transaction for a specific key, and transaction type - pub fn get_last_transaction_block(key: &T::AccountId, tx_type: &TransactionType) -> u64 { - match tx_type { - TransactionType::RegisterNetwork => Self::get_network_last_lock_block(), - _ => Self::get_last_transaction_block_on_subnet(key, NetUid::ROOT, tx_type), + pub fn last_block(&self, key: &T::AccountId) -> u64 { + match self { + Self::RegisterNetwork => Pallet::::get_network_last_lock_block(), + _ => self.last_block_on_subnet::(key, NetUid::ROOT), } } - /// Get the block number of the last transaction for a specific hotkey, network, and transaction type - pub fn get_last_transaction_block_on_subnet( - hotkey: &T::AccountId, - netuid: NetUid, - tx_type: &TransactionType, - ) -> u64 { - match tx_type { - TransactionType::RegisterNetwork => Self::get_network_last_lock_block(), - TransactionType::SetSNOwnerHotkey => { - Self::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) - } - TransactionType::OwnerHyperparamUpdate => { - Self::get_rate_limited_last_block(&RateLimitKey::OwnerHyperparamUpdate(netuid)) + /// Get the block number of the last transaction for a specific hotkey, network, and transaction + /// type + pub fn last_block_on_subnet(&self, hotkey: &T::AccountId, netuid: NetUid) -> u64 { + match self { + Self::RegisterNetwork => Pallet::::get_network_last_lock_block(), + Self::SetSNOwnerHotkey => { + Pallet::::get_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid)) } + Self::OwnerHyperparamUpdate(hparam) => Pallet::::get_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid, *hparam), + ), _ => { - let tx_as_u16: u16 = (*tx_type).into(); - TransactionKeyLastBlock::::get((hotkey, netuid, tx_as_u16)) + let tx_type: u16 = (*self).into(); + TransactionKeyLastBlock::::get((hotkey, netuid, tx_type)) } } } - /// Set the block number of the last transaction for a specific hotkey, network, and transaction type - pub fn set_last_transaction_block_on_subnet( + /// Set the block number of the last transaction for a specific hotkey, network, and transaction + /// type + pub fn set_last_block_on_subnet( + &self, key: &T::AccountId, netuid: NetUid, - tx_type: &TransactionType, block: u64, ) { - match tx_type { - TransactionType::RegisterNetwork => Self::set_network_last_lock_block(block), - TransactionType::SetSNOwnerHotkey => { - Self::set_rate_limited_last_block(&RateLimitKey::SetSNOwnerHotkey(netuid), block) - } - TransactionType::OwnerHyperparamUpdate => Self::set_rate_limited_last_block( - &RateLimitKey::OwnerHyperparamUpdate(netuid), + match self { + Self::RegisterNetwork => Pallet::::set_network_last_lock_block(block), + Self::SetSNOwnerHotkey => Pallet::::set_rate_limited_last_block( + &RateLimitKey::SetSNOwnerHotkey(netuid), + block, + ), + Self::OwnerHyperparamUpdate(hparam) => Pallet::::set_rate_limited_last_block( + &RateLimitKey::OwnerHyperparamUpdate(netuid, *hparam), block, ), _ => { - let tx_as_u16: u16 = (*tx_type).into(); - TransactionKeyLastBlock::::insert((key, netuid, tx_as_u16), block); + let tx_type: u16 = (*self).into(); + TransactionKeyLastBlock::::insert((key, netuid, tx_type), block); } } } +} + +/// Implement conversion from TransactionType to u16 +impl From for u16 { + fn from(tx_type: TransactionType) -> Self { + match tx_type { + TransactionType::SetChildren => 0, + TransactionType::SetChildkeyTake => 1, + TransactionType::Unknown => 2, + TransactionType::RegisterNetwork => 3, + TransactionType::SetWeightsVersionKey => 4, + TransactionType::SetSNOwnerHotkey => 5, + TransactionType::OwnerHyperparamUpdate(_) => 6, + TransactionType::SubsubnetParameterUpdate => 7, + } + } +} + +/// Implement conversion from u16 to TransactionType +impl From for TransactionType { + fn from(value: u16) -> Self { + match value { + 0 => TransactionType::SetChildren, + 1 => TransactionType::SetChildkeyTake, + 3 => TransactionType::RegisterNetwork, + 4 => TransactionType::SetWeightsVersionKey, + 5 => TransactionType::SetSNOwnerHotkey, + 6 => TransactionType::OwnerHyperparamUpdate(Hyperparameter::Unknown), + 7 => TransactionType::SubsubnetParameterUpdate, + _ => TransactionType::Unknown, + } + } +} + +impl From for TransactionType { + fn from(param: Hyperparameter) -> Self { + Self::OwnerHyperparamUpdate(param) + } +} + +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, Debug, TypeInfo)] +#[non_exhaustive] +pub enum Hyperparameter { + Unknown = 0, + ServingRateLimit = 1, + MaxDifficulty = 2, + AdjustmentAlpha = 3, + MaxWeightLimit = 4, + ImmunityPeriod = 5, + MinAllowedWeights = 6, + Kappa = 7, + Rho = 8, + ActivityCutoff = 9, + PowRegistrationAllowed = 10, + MinBurn = 11, + MaxBurn = 12, + BondsMovingAverage = 13, + BondsPenalty = 14, + CommitRevealEnabled = 15, + LiquidAlphaEnabled = 16, + AlphaValues = 17, + WeightCommitInterval = 18, + TransferEnabled = 19, + AlphaSigmoidSteepness = 20, + Yuma3Enabled = 21, + BondsResetEnabled = 22, + ImmuneNeuronLimit = 23, +} + +impl Pallet { + // ======================== + // ==== Rate Limiting ===== + // ======================== pub fn remove_last_tx_block(key: &T::AccountId) { Self::remove_rate_limited_last_block(&RateLimitKey::LastTxBlock(key.clone())) From cdfccff79c885382cca865f43828e557f0922977 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 19:21:26 +0300 Subject: [PATCH 235/379] Test hyperparams updates rate limited independently --- pallets/admin-utils/src/tests/mod.rs | 82 ++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 0e6fdc23b7..fade295f23 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2181,6 +2181,88 @@ fn test_hyperparam_rate_limit_enforced_by_tempo() { }); } +// Verifies owner hyperparameters are rate-limited independently per parameter. +// Setting one hyperparameter should not block setting a different hyperparameter +// during the same rate-limit window, but it should still block itself. +#[test] +fn test_owner_hyperparam_rate_limit_independent_per_param() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(7); + add_network(netuid, 10); + + // Set subnet owner + let owner: U256 = U256::from(123); + SubnetOwner::::insert(netuid, owner); + + // Use small tempo to make RL short and deterministic (2 blocks when tempo=1) + SubtensorModule::set_tempo(netuid, 1); + // Disable admin freeze window so it doesn't interfere with small tempo + assert_ok!(AdminUtils::sudo_set_admin_freeze_window( + <::RuntimeOrigin>::root(), + 0 + )); + + // First update to kappa should succeed + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 10 + )); + + // Immediate second update to the SAME param (kappa) should be blocked by RL + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 11 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Updating a DIFFERENT param (rho) should pass immediately — independent RL key + assert_ok!(AdminUtils::sudo_set_rho( + <::RuntimeOrigin>::signed(owner), + netuid, + 5 + )); + + // kappa should still be blocked until its own RL window passes + assert_noop!( + AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 12 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // rho should also be blocked for itself immediately after being set + assert_noop!( + AdminUtils::sudo_set_rho( + <::RuntimeOrigin>::signed(owner), + netuid, + 6 + ), + SubtensorError::::TxRateLimitExceeded + ); + + // Advance enough blocks to pass the RL window (2 blocks when tempo=1 and default epochs=2) + run_to_block(SubtensorModule::get_current_block_as_u64() + 2); + + // Now both hyperparameters can be updated again + assert_ok!(AdminUtils::sudo_set_kappa( + <::RuntimeOrigin>::signed(owner), + netuid, + 13 + )); + assert_ok!(AdminUtils::sudo_set_rho( + <::RuntimeOrigin>::signed(owner), + netuid, + 7 + )); + }); +} + #[test] fn test_sudo_set_max_burn() { new_test_ext().execute_with(|| { From e700fedb2dab6511ab5ef00a14a19927242e524f Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 19:25:10 +0300 Subject: [PATCH 236/379] Reformat --- pallets/admin-utils/src/tests/mod.rs | 6 +----- .../src/migrations/migrate_owner_hparam_rl_to_epochs.rs | 5 +---- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index fade295f23..f4a21a45c4 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2238,11 +2238,7 @@ fn test_owner_hyperparam_rate_limit_independent_per_param() { // rho should also be blocked for itself immediately after being set assert_noop!( - AdminUtils::sudo_set_rho( - <::RuntimeOrigin>::signed(owner), - netuid, - 6 - ), + AdminUtils::sudo_set_rho(<::RuntimeOrigin>::signed(owner), netuid, 6), SubtensorError::::TxRateLimitExceeded ); diff --git a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs index 4a1bfa48be..236bb10d5f 100644 --- a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs +++ b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs @@ -11,10 +11,7 @@ pub fn migrate_owner_hyperparam_rl_to_epochs() -> Weight { let mut weight = T::DbWeight::get().reads(1); if HasMigrationRun::::get(&migration_name) { - log::info!( - "Migration '{:?}' already executed. Skipping.", - migration_name - ); + log::info!("Migration '{migration_name:?}' already executed. Skipping."); return weight; } From cc5eedd4ad2b32f61f22d691c73a1c263c9c22c5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 19:46:25 +0300 Subject: [PATCH 237/379] Fix admin-utils benchmarks --- pallets/admin-utils/src/benchmarking.rs | 63 +++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index b8dafc0de2..54ede60a18 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -20,6 +20,9 @@ mod benchmarks { #[benchmark] fn swap_authorities(a: Linear<0, 32>) { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); + let mut value: BoundedVec< ::AuthorityId, ::MaxAuthorities, @@ -39,6 +42,8 @@ mod benchmarks { #[benchmark] fn schedule_grandpa_change(a: Linear<0, 32>) { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); let next_authorities = (1..=a) .map(|idx| account("Authority", idx, 0u32)) .collect::>(); @@ -50,18 +55,24 @@ mod benchmarks { #[benchmark] fn sudo_set_default_take() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); #[extrinsic_call] _(RawOrigin::Root, 100u16/*default_take*/)/*sudo_set_default_take*/; } #[benchmark] fn sudo_set_serving_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); #[extrinsic_call] _(RawOrigin::Root, 1u16.into()/*netuid*/, 100u64/*serving_rate_limit*/)/*sudo_set_serving_rate_limit*/; } #[benchmark] fn sudo_set_max_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -73,6 +84,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -84,6 +97,8 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_set_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -95,6 +110,8 @@ mod benchmarks { #[benchmark] fn sudo_set_weights_version_key() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -106,6 +123,8 @@ mod benchmarks { #[benchmark] fn sudo_set_bonds_moving_average() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -117,6 +136,8 @@ mod benchmarks { #[benchmark] fn sudo_set_bonds_penalty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -128,6 +149,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_allowed_validators() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -139,6 +162,8 @@ mod benchmarks { #[benchmark] fn sudo_set_difficulty() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -150,6 +175,8 @@ mod benchmarks { #[benchmark] fn sudo_set_adjustment_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -161,6 +188,8 @@ mod benchmarks { #[benchmark] fn sudo_set_target_registrations_per_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -172,6 +201,8 @@ mod benchmarks { #[benchmark] fn sudo_set_activity_cutoff() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -183,6 +214,8 @@ mod benchmarks { #[benchmark] fn sudo_set_rho() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -194,6 +227,8 @@ mod benchmarks { #[benchmark] fn sudo_set_kappa() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -205,6 +240,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_allowed_uids() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -216,6 +253,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_allowed_weights() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -227,6 +266,8 @@ mod benchmarks { #[benchmark] fn sudo_set_immunity_period() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -238,6 +279,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_weight_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -249,6 +292,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_registrations_per_block() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -260,6 +305,8 @@ mod benchmarks { #[benchmark] fn sudo_set_max_burn() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -271,6 +318,8 @@ mod benchmarks { #[benchmark] fn sudo_set_min_burn() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -282,6 +331,8 @@ mod benchmarks { #[benchmark] fn sudo_set_network_registration_allowed() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -304,6 +355,8 @@ mod benchmarks { */ #[benchmark] fn sudo_set_tempo() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -315,6 +368,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_interval() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -326,6 +381,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_weights_enabled() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -337,6 +394,8 @@ mod benchmarks { #[benchmark] fn sudo_set_commit_reveal_version() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ @@ -354,12 +413,16 @@ mod benchmarks { #[benchmark] fn sudo_set_owner_hparam_rate_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); #[extrinsic_call] _(RawOrigin::Root, 10u64/*limit*/)/*sudo_set_owner_hparam_rate_limit*/; } #[benchmark] fn sudo_set_owner_immune_neuron_limit() { + // disable admin freeze window + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ From c3683184fcb1a401929f6ed91432181019d35082 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Fri, 12 Sep 2025 19:54:27 +0300 Subject: [PATCH 238/379] Update localnet patch --- scripts/localnet_patch.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/localnet_patch.sh b/scripts/localnet_patch.sh index f5a84e8348..e3bee8c5b8 100755 --- a/scripts/localnet_patch.sh +++ b/scripts/localnet_patch.sh @@ -19,7 +19,7 @@ if ! grep -q 'pub fn DefaultPendingCooldown() -> u64 {' "$DefaultPend exit 1 fi -if ! grep -q 'TransactionType::SetChildren => 150, // 30 minutes' "$SetChildren"; then +if ! grep -q 'Self::SetChildren => 150, // 30 minutes' "$SetChildren"; then echo "Error: Target string not found in $SetChildren" exit 1 fi @@ -27,6 +27,6 @@ fi # replace perl -0777 -i -pe 's|pub const DurationOfStartCall: u64 = prod_or_fast!\(7 \* 24 \* 60 \* 60 / 12, 10\);|pub const DurationOfStartCall: u64 = prod_or_fast!(5, 10);|' "$DurationOfStartCall" perl -0777 -i -pe 's|pub fn DefaultPendingCooldown\(\) -> u64 \{\s*prod_or_fast!\(7_200, 15\)\s*\}|pub fn DefaultPendingCooldown() -> u64 {\n prod_or_fast!(15, 15)\n }|g' "$DefaultPendingCooldown" -perl -0777 -i -pe 's|TransactionType::SetChildren => 150, // 30 minutes|TransactionType::SetChildren => 15, // 3 min|' "$SetChildren" +perl -0777 -i -pe 's|Self::SetChildren => 150, // 30 minutes|Self::SetChildren => 15, // 3 min|' "$SetChildren" echo "Patch applied successfully." From b654eafa69f4fbbc24c485efd48d9dba10fb2825 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sun, 14 Sep 2025 14:36:34 -0700 Subject: [PATCH 239/379] add migrate_network_lock_reduction_interval --- pallets/subtensor/src/coinbase/root.rs | 1 + pallets/subtensor/src/macros/hooks.rs | 4 +- .../migrations/migrate_init_total_issuance.rs | 2 +- ...migrate_network_lock_reduction_interval.rs | 47 +++++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/subnets/subnet.rs | 7 ++- pallets/subtensor/src/tests/migration.rs | 30 ++++++++++++ 7 files changed, 86 insertions(+), 6 deletions(-) create mode 100644 pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index fa16e13189..113bd5b1af 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -492,6 +492,7 @@ impl Pallet { PendingOwnerCut::::remove(netuid); BlocksSinceLastStep::::remove(netuid); LastMechansimStepBlock::::remove(netuid); + LastAdjustmentBlock::::remove(netuid); // --- 16. Serving / rho / curves, and other per-net controls. ServingRateLimit::::remove(netuid); diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 1615e87cd6..12ef167ef9 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -141,7 +141,9 @@ mod hooks { // Migrate Immunity Period .saturating_add(migrations::migrate_network_immunity_period::migrate_network_immunity_period::()) // Migrate Subnet Limit - .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()); + .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()) + // Migrate Lock Reduction Interval + .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs index 042ad0fe77..6a05dc5a85 100644 --- a/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs +++ b/pallets/subtensor/src/migrations/migrate_init_total_issuance.rs @@ -15,7 +15,7 @@ pub mod deprecated_loaded_emission_format { } pub(crate) fn migrate_init_total_issuance() -> Weight { - let subnets_len = crate::SubnetLocked::::iter().count() as u64; + let subnets_len = crate::NetworksAdded::::iter().count() as u64; // Retrieve the total balance of all accounts let total_account_balances = <::Currency as fungible::Inspect< diff --git a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs new file mode 100644 index 0000000000..56079b0a13 --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs @@ -0,0 +1,47 @@ +use super::*; +use frame_support::{traits::Get, weights::Weight}; +use log; +use scale_info::prelude::string::String; + +pub fn migrate_network_lock_reduction_interval() -> Weight { + const NEW_VALUE: u64 = 28_800; + + let migration_name = b"migrate_network_lock_reduction_interval".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + // Skip if already executed + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + // ── 1) Set new values ───────────────────────────────────────────────── + NetworkLockReductionInterval::::put(NEW_VALUE); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + NetworkRateLimit::::put(NEW_VALUE); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + Pallet::::set_network_last_lock(TaoCurrency::from(1_000_000_000_000)); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + Pallet::::set_network_last_lock_block(Pallet::::get_current_block_as_u64()); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // ── 2) Mark migration done ─────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed - NetworkLockReductionInterval => {}.", + String::from_utf8_lossy(&migration_name), + NEW_VALUE + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index a12f04627b..55b249decd 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -20,6 +20,7 @@ pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; pub mod migrate_network_immunity_period; +pub mod migrate_network_lock_reduction_interval; pub mod migrate_orphaned_storage_items; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index abaa1e02c5..42f2ff92b5 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -213,11 +213,10 @@ impl Pallet { SubnetAlphaIn::::insert(netuid_to_register, pool_initial_alpha); SubnetOwner::::insert(netuid_to_register, coldkey.clone()); SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); - SubnetLocked::::insert(netuid_to_register, pool_initial_tao); - LargestLocked::::insert(netuid_to_register, pool_initial_tao.to_u64()); + SubnetLocked::::insert(netuid_to_register, actual_tao_lock_amount); SubnetTaoProvided::::insert(netuid_to_register, TaoCurrency::ZERO); - SubnetAlphaInProvided::::insert(netuid_to_register, AlphaCurrency::from(0)); - SubnetAlphaOut::::insert(netuid_to_register, AlphaCurrency::from(0)); + SubnetAlphaInProvided::::insert(netuid_to_register, AlphaCurrency::ZERO); + SubnetAlphaOut::::insert(netuid_to_register, AlphaCurrency::ZERO); SubnetVolume::::insert(netuid_to_register, 0u128); RAORecycledForRegistration::::insert( netuid_to_register, diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index f860248cce..f953e8eb47 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1624,3 +1624,33 @@ fn test_migrate_subnet_limit_to_default() { ); }); } + +#[test] +fn test_migrate_network_lock_reduction_interval_and_decay() { + new_test_ext(0).execute_with(|| { + // ── pre ────────────────────────────────────────────────────────────── + assert!( + !HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), + "HasMigrationRun should be false before migration" + ); + + // ensure current_block > 0 so mult = 2 after migration + step_block(1); + + // ── run migration ──────────────────────────────────────────────────── + let weight = crate::migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::(); + assert!(!weight.is_zero(), "migration weight should be > 0"); + + // ── params & flags ─────────────────────────────────────────────────── + assert_eq!(NetworkLockReductionInterval::::get(), 28_800); + assert_eq!(NetworkRateLimit::::get(), 28_800); + assert_eq!(Pallet::::get_network_last_lock(), 1_000_000_000_000u64.into()); // 1000 TAO in rAO + + let start_block = Pallet::::get_network_last_lock_block(); + assert_eq!(start_block, Pallet::::get_current_block_as_u64()); + assert!( + HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), + "HasMigrationRun should be true after migration" + ); + }); +} From 101785afb22ec1e06fb0edf951bbac93823f79c6 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Mon, 15 Sep 2025 11:55:25 +0300 Subject: [PATCH 240/379] Update weights in admin-utils --- pallets/admin-utils/src/lib.rs | 104 ++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 4af202132f..e4f0a4c98e 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -205,8 +205,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the serving rate limit. #[pallet::call_index(3)] - #[pallet::weight(Weight::from_parts(6_522_000, 0) - .saturating_add(::DbWeight::get().reads(0_u64)) + #[pallet::weight(Weight::from_parts(22_980_000, 0) + .saturating_add(::DbWeight::get().reads(2_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_serving_rate_limit( origin: OriginFor, @@ -232,8 +232,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum difficulty. #[pallet::call_index(4)] - #[pallet::weight(Weight::from_parts(15_230_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_390_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_difficulty( origin: OriginFor, @@ -257,8 +257,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum difficulty. #[pallet::call_index(5)] - #[pallet::weight(Weight::from_parts(15_700_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_990_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_difficulty( origin: OriginFor, @@ -291,8 +291,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the weights version key. #[pallet::call_index(6)] - #[pallet::weight(Weight::from_parts(17_460_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_220_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_weights_version_key( origin: OriginFor, @@ -420,8 +420,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the adjustment beta. #[pallet::call_index(12)] - #[pallet::weight(Weight::from_parts(15_170_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_890_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_weight_limit( origin: OriginFor, @@ -454,8 +454,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the immunity period. #[pallet::call_index(13)] - #[pallet::weight(Weight::from_parts(15_510_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_620_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_immunity_period( origin: OriginFor, @@ -488,8 +488,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum allowed weights. #[pallet::call_index(14)] - #[pallet::weight(Weight::from_parts(15_220_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_630_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_weights( origin: OriginFor, @@ -550,8 +550,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the kappa. #[pallet::call_index(16)] - #[pallet::weight(Weight::from_parts(16_740_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_210_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_kappa(origin: OriginFor, netuid: NetUid, kappa: u16) -> DispatchResult { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( @@ -578,8 +578,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the rho. #[pallet::call_index(17)] - #[pallet::weight(Weight::from_parts(12_570_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(23_360_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_rho(origin: OriginFor, netuid: NetUid, rho: u16) -> DispatchResult { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( @@ -606,8 +606,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the activity cutoff. #[pallet::call_index(18)] - #[pallet::weight(Weight::from_parts(17_510_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(28_720_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_activity_cutoff( origin: OriginFor, @@ -707,8 +707,8 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the target registrations per interval. #[pallet::call_index(21)] - #[pallet::weight(Weight::from_parts(15_320_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(25_860_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_target_registrations_per_interval( origin: OriginFor, @@ -735,8 +735,8 @@ pub mod pallet { /// It is only callable by root and subnet owner. /// The extrinsic will call the Subtensor pallet to set the minimum burn. #[pallet::call_index(22)] - #[pallet::weight(Weight::from_parts(18_870_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(29_970_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_burn( origin: OriginFor, @@ -775,8 +775,8 @@ pub mod pallet { /// It is only callable by root and subnet owner. /// The extrinsic will call the Subtensor pallet to set the maximum burn. #[pallet::call_index(23)] - #[pallet::weight(Weight::from_parts(19_420_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(30_510_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_burn( origin: OriginFor, @@ -815,8 +815,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the difficulty. #[pallet::call_index(24)] - #[pallet::weight(Weight::from_parts(15_650_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_230_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_difficulty( origin: OriginFor, @@ -837,8 +837,8 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum allowed validators. #[pallet::call_index(25)] - #[pallet::weight(Weight::from_parts(19_300_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(30_930_000, 0) + .saturating_add(::DbWeight::get().reads(4_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_allowed_validators( origin: OriginFor, @@ -870,8 +870,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds moving average. #[pallet::call_index(26)] - #[pallet::weight(Weight::from_parts(15_140_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_270_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_moving_average( origin: OriginFor, @@ -910,8 +910,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the bonds penalty. #[pallet::call_index(60)] - #[pallet::weight(Weight::from_parts(16_220_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_890_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_bonds_penalty( origin: OriginFor, @@ -942,8 +942,8 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the maximum registrations per block. #[pallet::call_index(27)] - #[pallet::weight(Weight::from_parts(15_080_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_970_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_max_registrations_per_block( origin: OriginFor, @@ -1010,8 +1010,8 @@ pub mod pallet { /// It is only callable by the root account. /// The extrinsic will call the Subtensor pallet to set the tempo. #[pallet::call_index(30)] - #[pallet::weight(Weight::from_parts(15_180_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(25_790_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_tempo(origin: OriginFor, netuid: NetUid, tempo: u16) -> DispatchResult { pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; @@ -1194,8 +1194,8 @@ pub mod pallet { /// It is only callable by the root account or subnet owner. /// The extrinsic will call the Subtensor pallet to set the value. #[pallet::call_index(49)] - #[pallet::weight(Weight::from_parts(15_150_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_730_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_enabled( origin: OriginFor, @@ -1362,8 +1362,8 @@ pub mod pallet { /// # Weight /// Weight is handled by the `#[pallet::weight]` attribute. #[pallet::call_index(57)] - #[pallet::weight(Weight::from_parts(19_320_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(26_950_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_commit_reveal_weights_interval( origin: OriginFor, @@ -1786,8 +1786,8 @@ pub mod pallet { /// Sets the number of immune owner neurons #[pallet::call_index(72)] - #[pallet::weight(Weight::from_parts(4_639_000, 0) - .saturating_add(::DbWeight::get().reads(0_u64)) + #[pallet::weight(Weight::from_parts(18_020_000, 0) + .saturating_add(::DbWeight::get().reads(2_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_owner_immune_neuron_limit( origin: OriginFor, @@ -1825,7 +1825,13 @@ pub mod pallet { /// Sets the admin freeze window length (in blocks) at the end of a tempo. /// Only callable by root. #[pallet::call_index(74)] - #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + #[pallet::weight(( + Weight::from_parts(5_771_000, 0) + .saturating_add(::DbWeight::get().reads(0_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::No + ))] pub fn sudo_set_admin_freeze_window(origin: OriginFor, window: u16) -> DispatchResult { ensure_root(origin)?; pallet_subtensor::Pallet::::set_admin_freeze_window(window); @@ -1836,7 +1842,13 @@ pub mod pallet { /// Sets the owner hyperparameter rate limit (in blocks). /// Only callable by root. #[pallet::call_index(75)] - #[pallet::weight((0, DispatchClass::Operational, Pays::No))] + #[pallet::weight(( + Weight::from_parts(5_701_000, 0) + .saturating_add(::DbWeight::get().reads(0_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)), + DispatchClass::Operational, + Pays::No + ))] pub fn sudo_set_owner_hparam_rate_limit( origin: OriginFor, limit: u64, From 52b4cef42c89900aaf2aaacacdf1e30963f1c5fb Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Mon, 15 Sep 2025 17:01:57 +0300 Subject: [PATCH 241/379] Make set_admin_freeze_window and set_owner_hparam_rate_limit Pays::Yes --- pallets/admin-utils/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index e4f0a4c98e..424fe1c6aa 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1829,8 +1829,7 @@ pub mod pallet { Weight::from_parts(5_771_000, 0) .saturating_add(::DbWeight::get().reads(0_u64)) .saturating_add(::DbWeight::get().writes(1_u64)), - DispatchClass::Operational, - Pays::No + DispatchClass::Operational ))] pub fn sudo_set_admin_freeze_window(origin: OriginFor, window: u16) -> DispatchResult { ensure_root(origin)?; @@ -1846,8 +1845,7 @@ pub mod pallet { Weight::from_parts(5_701_000, 0) .saturating_add(::DbWeight::get().reads(0_u64)) .saturating_add(::DbWeight::get().writes(1_u64)), - DispatchClass::Operational, - Pays::No + DispatchClass::Operational ))] pub fn sudo_set_owner_hparam_rate_limit( origin: OriginFor, From d9fb4d22c15c00bbb72f3ec4e61fc31c3d4af5ce Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 15 Sep 2025 07:36:08 -0700 Subject: [PATCH 242/379] fix merge --- pallets/subtensor/src/coinbase/root.rs | 36 ++++++++-------------- pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/tests/networks.rs | 32 ++++++++++++------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 4702eb3e85..03a82cac93 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -391,7 +391,6 @@ impl Pallet { // --- 1. Get the owner and remove from SubnetOwner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); SubnetOwner::::remove(netuid); - let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -409,16 +408,6 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); - } - - // --- 7. Remove the weights for this subnet itself. - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); - } // --- 8. Iterate over stored weights and fill the matrix. for (uid_i, weights_i) in Weights::::iter_prefix(NetUidStorageIndex::ROOT) { @@ -438,17 +427,10 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - Incentive::::remove(netuid_index); - } + Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - LastUpdate::::remove(netuid_index); - } ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); @@ -550,10 +532,18 @@ impl Pallet { let _ = AssociatedEvmAddress::::clear_prefix(netuid, u32::MAX, None); // Commit-reveal / weights commits (all per-net prefixes): - let _ = WeightCommits::::clear_prefix(netuid, u32::MAX, None); - let _ = TimelockedWeightCommits::::clear_prefix(netuid, u32::MAX, None); - let _ = CRV3WeightCommits::::clear_prefix(netuid, u32::MAX, None); - let _ = CRV3WeightCommitsV2::::clear_prefix(netuid, u32::MAX, None); + let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + LastUpdate::::remove(netuid_index); + Incentive::::remove(netuid_index); + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = CRV3WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + let _ = CRV3WeightCommitsV2::::clear_prefix(netuid_index, u32::MAX, None); + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + } RevealPeriodEpochs::::remove(netuid); // Last hotkey swap (DMAP where netuid is FIRST key → easy) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 3544ed9f92..3d89eacb5f 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2346,7 +2346,7 @@ mod dispatches { /// Remove a user's subnetwork /// The caller must be root - #[pallet::call_index(115)] + #[pallet::call_index(120)] #[pallet::weight((Weight::from_parts(119_000_000, 0) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index b70001c2b0..732f93d13d 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -6,7 +6,7 @@ use frame_system::Config; use sp_core::U256; use sp_std::collections::btree_map::BTreeMap; use substrate_fixed::types::{I96F32, U64F64, U96F32}; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::{OrderType, SwapHandler}; #[test] @@ -289,7 +289,6 @@ fn dissolve_clears_all_per_subnet_storages() { SubnetOwner::::insert(net, owner_cold); SubnetOwnerHotkey::::insert(net, owner_hot); SubnetworkN::::insert(net, 0u16); - NetworkModality::::insert(net, 0u16); NetworksAdded::::insert(net, true); NetworkRegisteredAt::::insert(net, 0u64); @@ -298,11 +297,11 @@ fn dissolve_clears_all_per_subnet_storages() { Trust::::insert(net, vec![1u16]); Active::::insert(net, vec![true]); Emission::::insert(net, vec![AlphaCurrency::from(1)]); - Incentive::::insert(net, vec![1u16]); + Incentive::::insert(NetUidStorageIndex::from(net), vec![1u16]); Consensus::::insert(net, vec![1u16]); Dividends::::insert(net, vec![1u16]); PruningScores::::insert(net, vec![1u16]); - LastUpdate::::insert(net, vec![0u64]); + LastUpdate::::insert(NetUidStorageIndex::from(net), vec![0u64]); ValidatorPermit::::insert(net, vec![true]); ValidatorTrust::::insert(net, vec![1u16]); @@ -334,8 +333,8 @@ fn dissolve_clears_all_per_subnet_storages() { // Prefix / double-map collections Keys::::insert(net, 0u16, owner_hot); - Bonds::::insert(net, 0u16, vec![(0u16, 1u16)]); - Weights::::insert(net, 0u16, vec![(1u16, 1u16)]); + Bonds::::insert(NetUidStorageIndex::from(net), 0u16, vec![(0u16, 1u16)]); + Weights::::insert(NetUidStorageIndex::from(net), 0u16, vec![(1u16, 1u16)]); // Membership entry for the SAME hotkey as Keys IsNetworkMember::::insert(owner_hot, net, true); @@ -437,7 +436,6 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!SubnetOwner::::contains_key(net)); assert!(!SubnetOwnerHotkey::::contains_key(net)); assert!(!SubnetworkN::::contains_key(net)); - assert!(!NetworkModality::::contains_key(net)); assert!(!NetworksAdded::::contains_key(net)); assert!(!NetworkRegisteredAt::::contains_key(net)); @@ -446,11 +444,15 @@ fn dissolve_clears_all_per_subnet_storages() { assert!(!Trust::::contains_key(net)); assert!(!Active::::contains_key(net)); assert!(!Emission::::contains_key(net)); - assert!(!Incentive::::contains_key(net)); + assert!(!Incentive::::contains_key(NetUidStorageIndex::from( + net + ))); assert!(!Consensus::::contains_key(net)); assert!(!Dividends::::contains_key(net)); assert!(!PruningScores::::contains_key(net)); - assert!(!LastUpdate::::contains_key(net)); + assert!(!LastUpdate::::contains_key(NetUidStorageIndex::from( + net + ))); assert!(!ValidatorPermit::::contains_key(net)); assert!(!ValidatorTrust::::contains_key(net)); @@ -483,8 +485,16 @@ fn dissolve_clears_all_per_subnet_storages() { // Collections fully cleared assert!(Keys::::iter_prefix(net).next().is_none()); - assert!(Bonds::::iter_prefix(net).next().is_none()); - assert!(Weights::::iter_prefix(net).next().is_none()); + assert!( + Bonds::::iter_prefix(NetUidStorageIndex::from(net)) + .next() + .is_none() + ); + assert!( + Weights::::iter_prefix(NetUidStorageIndex::from(net)) + .next() + .is_none() + ); assert!(!IsNetworkMember::::contains_key(owner_hot, net)); // Token / price / provided reserves From b796bb6a9dd5ef788d93cbdc8cf6f62f2529f5f0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 15 Sep 2025 16:08:28 +0000 Subject: [PATCH 243/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 1325a6258c..a8b981048b 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -302,7 +302,7 @@ mod dispatches { /// #[pallet::call_index(100)] #[pallet::weight((Weight::from_parts(100_500_000, 0) - .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -932,7 +932,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(42_000_000, 0) + #[pallet::weight((Weight::from_parts(29_760_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -1299,8 +1299,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(36)) - .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(37_u64)) + .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1586,8 +1586,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) - .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(36_u64)) + .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, From 2ce716bd26558b1a0d6fa14cc860d0b8f6e56d32 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Mon, 15 Sep 2025 13:27:21 -0300 Subject: [PATCH 244/379] MinAllowedUids to 64 --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c81924398a..1c7a026380 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1152,7 +1152,7 @@ parameter_types! { pub const SubtensorInitialRAORecycledForRegistration: u64 = 0; // 0 rao pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; - pub const SubtensorInitialMinAllowedUids: u16 = 256; + pub const SubtensorInitialMinAllowedUids: u16 = 64; pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent // pub const SubtensorInitialSubnetLimit: u16 = 12; // (DEPRECATED) From f0e3a113265e31db1d05bcf42ab01ec45c60de18 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Mon, 15 Sep 2025 12:43:28 -0400 Subject: [PATCH 245/379] Fix subsubnet rate limiting and add tests --- pallets/admin-utils/src/lib.rs | 8 +-- pallets/admin-utils/src/tests/mod.rs | 61 ++++++++++++++++++++ pallets/subtensor/src/lib.rs | 7 ++- pallets/subtensor/src/subnets/subsubnet.rs | 2 +- pallets/subtensor/src/utils/rate_limiting.rs | 12 ++-- 5 files changed, 80 insertions(+), 10 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 4af202132f..89931d22b1 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1860,7 +1860,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::SubsubnetParameterUpdate], + &[TransactionType::SubsubnetCountUpdate], )?; pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; @@ -1868,7 +1868,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SubsubnetParameterUpdate], + &[TransactionType::SubsubnetCountUpdate], ); Ok(()) } @@ -1886,7 +1886,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::SubsubnetParameterUpdate], + &[TransactionType::SubsubnetEmission], )?; pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; @@ -1894,7 +1894,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SubsubnetParameterUpdate], + &[TransactionType::SubsubnetEmission], ); Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 25b1b89607..b4741bbfd9 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2269,3 +2269,64 @@ fn test_sudo_set_subsubnet_count() { )); }); } + +// cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_subsubnet_count_and_emissions --exact --show-output +#[test] +fn test_sudo_set_subsubnet_count_and_emissions() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = SubId::from(2); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_ok!(AdminUtils::sudo_set_subsubnet_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + + // Cannot set emission split with wrong number of entries + // With two subsubnets the size of the split vector should be 2, not 3 + assert_noop!( + AdminUtils::sudo_set_subsubnet_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 2, 0xFFFF / 5 * 2, 0xFFFF / 5]) + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Cannot set emission split with wrong total of entries + // Split vector entries should sum up to exactly 0xFFFF + assert_noop!( + AdminUtils::sudo_set_subsubnet_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5 - 1]) + ), + pallet_subtensor::Error::::InvalidValue + ); + + // Can set good split ok + // We also verify here that it can happen in the same block as setting subsubnet counts + // or soon, without rate limiting + assert_ok!(AdminUtils::sudo_set_subsubnet_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5, 0xFFFF / 5 * 4]) + )); + + // Cannot set it again due to rate limits + assert_noop!( + AdminUtils::sudo_set_subsubnet_emission_split( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5]) + ), + pallet_subtensor::Error::::TxRateLimitExceeded + ); + }); +} diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index dd12a9b76b..18dfca8ea9 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1840,7 +1840,12 @@ pub mod pallet { #[pallet::type_value] /// -- ITEM (Rate limit for subsubnet count updates) pub fn SubsubnetCountSetRateLimit() -> u64 { - prod_or_fast!(7_200, 0) + prod_or_fast!(7_200, 1) + } + #[pallet::type_value] + /// -- ITEM (Rate limit for subsubnet emission distribution updates) + pub fn SubsubnetEmissionRateLimit() -> u64 { + prod_or_fast!(7_200, 1) } #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 904c380463..337bf809fd 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -172,7 +172,7 @@ impl Pallet { // Check that values add up to 65535 let total: u64 = split.iter().map(|s| *s as u64).sum(); - ensure!(total <= u16::MAX as u64, Error::::InvalidValue); + ensure!(total == u16::MAX as u64, Error::::InvalidValue); SubsubnetEmissionSplit::::insert(netuid, split); } else { diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index e9a8bb7b12..a1f7e6aac6 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -12,7 +12,8 @@ pub enum TransactionType { SetWeightsVersionKey, SetSNOwnerHotkey, OwnerHyperparamUpdate, - SubsubnetParameterUpdate, + SubsubnetCountUpdate, + SubsubnetEmission, } /// Implement conversion from TransactionType to u16 @@ -26,7 +27,8 @@ impl From for u16 { TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, TransactionType::OwnerHyperparamUpdate => 6, - TransactionType::SubsubnetParameterUpdate => 7, + TransactionType::SubsubnetCountUpdate => 7, + TransactionType::SubsubnetEmission => 8, } } } @@ -41,7 +43,8 @@ impl From for TransactionType { 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, 6 => TransactionType::OwnerHyperparamUpdate, - 7 => TransactionType::SubsubnetParameterUpdate, + 7 => TransactionType::SubsubnetCountUpdate, + 8 => TransactionType::SubsubnetEmission, _ => TransactionType::Unknown, } } @@ -57,7 +60,8 @@ impl Pallet { TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), - TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), + TransactionType::SubsubnetCountUpdate => SubsubnetCountSetRateLimit::::get(), + TransactionType::SubsubnetEmission => SubsubnetEmissionRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, From a051137af5eee89fbf1270d6f66a818c813c07cd Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Mon, 15 Sep 2025 16:18:12 -0300 Subject: [PATCH 246/379] extract rate limit --- pallets/admin-utils/src/lib.rs | 4 ++-- pallets/subtensor/src/lib.rs | 5 +++++ pallets/subtensor/src/utils/rate_limiting.rs | 8 ++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index a3497de7bc..007f3d2b10 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1930,7 +1930,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::SetMaxAllowedUIDS], + &[TransactionType::SetMaxAllowedUids], )?; pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; @@ -1938,7 +1938,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SetMaxAllowedUIDS], + &[TransactionType::SetMaxAllowedUids], ); Ok(()) } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 7780e1eb33..8af6f82724 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -618,6 +618,11 @@ pub mod pallet { T::InitialMaxAllowedUids::get() } #[pallet::type_value] + /// -- Rate limit for set max allowed UIDs + pub fn SetMaxAllowedUidsRateLimit() -> u64 { + prod_or_fast!(30 * 7200, 1) + } + #[pallet::type_value] /// Default immunity period. pub fn DefaultImmunityPeriod() -> u16 { T::InitialImmunityPeriod::get() diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 2e1b890440..d8427c1ba4 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -14,7 +14,7 @@ pub enum TransactionType { OwnerHyperparamUpdate, SubsubnetCountUpdate, SubsubnetEmission, - SetMaxAllowedUIDS, + SetMaxAllowedUids, } /// Implement conversion from TransactionType to u16 @@ -30,7 +30,7 @@ impl From for u16 { TransactionType::OwnerHyperparamUpdate => 6, TransactionType::SubsubnetCountUpdate => 7, TransactionType::SubsubnetEmission => 8, - TransactionType::SetMaxAllowedUIDS => 9, + TransactionType::SetMaxAllowedUids => 9, } } } @@ -47,7 +47,7 @@ impl From for TransactionType { 6 => TransactionType::OwnerHyperparamUpdate, 7 => TransactionType::SubsubnetCountUpdate, 8 => TransactionType::SubsubnetEmission, - 9 => TransactionType::SetMaxAllowedUIDS, + 9 => TransactionType::SetMaxAllowedUids, _ => TransactionType::Unknown, } } @@ -65,7 +65,7 @@ impl Pallet { TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), TransactionType::SubsubnetCountUpdate => SubsubnetCountSetRateLimit::::get(), TransactionType::SubsubnetEmission => SubsubnetEmissionRateLimit::::get(), - TransactionType::SetMaxAllowedUIDS => 7200 * 30, + TransactionType::SetMaxAllowedUids => SetMaxAllowedUidsRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, } From ac5d6b3815a7731e204fa074301fdce1b67b77f4 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Mon, 15 Sep 2025 16:19:26 -0300 Subject: [PATCH 247/379] fix benchmarks --- pallets/admin-utils/src/benchmarking.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index c01a70d3b5..75c373372d 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -240,6 +240,7 @@ mod benchmarks { #[benchmark] fn sudo_set_min_allowed_uids() { + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*tempo*/ @@ -445,6 +446,7 @@ mod benchmarks { #[benchmark] fn sudo_trim_to_max_allowed_uids() { + pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( 1u16.into(), /*netuid*/ 1u16, /*sudo_tempo*/ From ce11f6ba6c07395b3362ab6efb6d7b937fa3313c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 15 Sep 2025 12:59:53 -0700 Subject: [PATCH 248/379] migrate_restore_subnet_locked --- pallets/subtensor/src/macros/hooks.rs | 4 +- .../src/migrations/migrate_subnet_locked.rs | 118 ++++++++++++++++ pallets/subtensor/src/migrations/mod.rs | 1 + pallets/subtensor/src/tests/migration.rs | 131 ++++++++++++++++++ 4 files changed, 253 insertions(+), 1 deletion(-) create mode 100644 pallets/subtensor/src/migrations/migrate_subnet_locked.rs diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 13713354c8..a3cb7a692f 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -145,7 +145,9 @@ mod hooks { // Migrate Subnet Limit .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()) // Migrate Lock Reduction Interval - .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()); + .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()) + // Migrate subnet locked balances + .saturating_add(migrations::migrate_subnet_locked::migrate_restore_subnet_locked::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs new file mode 100644 index 0000000000..40f199e8af --- /dev/null +++ b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs @@ -0,0 +1,118 @@ +use super::*; +use frame_support::weights::Weight; +use log; +use scale_info::prelude::string::String; +use crate::{Config, HasMigrationRun, SubnetLocked, TaoCurrency}; +use subtensor_runtime_common::NetUid; + +pub fn migrate_restore_subnet_locked() -> Weight { + // Track whether we've already run this migration + let migration_name = b"migrate_restore_subnet_locked".to_vec(); + let mut weight = T::DbWeight::get().reads(1); + + if HasMigrationRun::::get(&migration_name) { + log::info!( + target: "runtime", + "Migration '{}' already run - skipping.", + String::from_utf8_lossy(&migration_name) + ); + return weight; + } + + // (netuid, locked_rao) pairs taken from the historical snapshot (block #4_828_623). + const SUBNET_LOCKED: &[(u16, u64)] = &[ + ( 2, 976_893_069_056), + ( 3,2_569_362_397_490), + ( 4,1_928_551_593_932), + ( 5,1_712_540_082_588), + ( 6,1_495_929_556_770), + ( 7,1_011_702_451_936), + ( 8, 337_484_391_024), + ( 9, 381_240_180_320), + ( 10,1_253_515_128_353), + ( 11,1_453_924_672_132), + ( 12, 100_000_000_000), + ( 13, 100_000_000_000), + ( 14,1_489_714_521_808), + ( 15,1_784_089_225_496), + ( 16, 889_176_219_484), + ( 17,1_266_310_122_772), + ( 18, 222_355_058_433), + ( 19, 100_000_000_000), + ( 20, 100_000_000_000), + ( 21, 885_096_322_978), + ( 22, 100_000_000_000), + ( 23, 100_000_000_000), + ( 24,5_146_073_854_481), + ( 25,1_782_920_948_214), + ( 26, 153_583_865_248), + ( 27, 201_344_183_084), + ( 28, 901_455_879_445), + ( 29, 175_000_001_600), + ( 30,1_419_730_660_074), + ( 31, 319_410_100_502), + ( 32,2_016_397_028_246), + ( 33,1_626_477_274_174), + ( 34,1_455_297_496_345), + ( 35,1_191_275_979_639), + ( 36,1_097_008_574_216), + ( 37, 864_664_455_362), + ( 38,1_001_936_494_076), + ( 39,1_366_096_404_884), + ( 40, 100_000_000_000), + ( 41, 535_937_523_200), + ( 42,1_215_698_423_344), + ( 43,1_641_308_676_800), + ( 44,1_514_636_189_434), + ( 45,1_605_608_381_438), + ( 46,1_095_943_027_350), + ( 47,1_499_235_469_986), + ( 48,1_308_073_720_362), + ( 49,1_222_672_092_068), + ( 50,2_628_355_421_561), + ( 51,1_520_860_720_561), + ( 52,1_794_457_248_725), + ( 53,1_721_472_811_492), + ( 54,2_048_900_691_868), + ( 55,1_278_597_446_119), + ( 56,2_016_045_544_480), + ( 57,1_920_563_399_676), + ( 58,2_246_525_691_504), + ( 59,1_776_159_384_888), + ( 60,2_173_138_865_414), + ( 61,1_435_634_867_728), + ( 62,2_061_282_563_888), + ( 63,3_008_967_320_998), + ( 64,2_099_236_359_026), + ]; + + let mut inserted: u32 = 0; + let mut total_rao: u128 = 0; + + // ── 1) Re-insert the historical values ──────────────────────────────── + for &(netuid_u16, amount_rao_u64) in SUBNET_LOCKED.iter() { + let key: NetUid = NetUid::from(netuid_u16); + let amount: TaoCurrency = TaoCurrency::from(amount_rao_u64); + + SubnetLocked::::insert(key, amount); + + inserted = inserted.saturating_add(1); + total_rao = total_rao.saturating_add(amount_rao_u64 as u128); + + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + } + + // ── 2) Mark migration done ──────────────────────────────────────────── + HasMigrationRun::::insert(&migration_name, true); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + log::info!( + target: "runtime", + "Migration '{}' completed - inserted {} SubnetLocked entries; total≈{} RAO.", + String::from_utf8_lossy(&migration_name), + inserted, + total_rao + ); + + weight +} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index f6339dc266..e7c50c0080 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -42,6 +42,7 @@ pub mod migrate_set_subtoken_enabled; pub mod migrate_stake_threshold; pub mod migrate_subnet_identities_to_v3; pub mod migrate_subnet_limit_to_default; +pub mod migrate_subnet_locked; pub mod migrate_subnet_symbols; pub mod migrate_subnet_volume; pub mod migrate_to_v1_separate_emission; diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index b09420ae8a..f052d70e96 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1783,3 +1783,134 @@ fn test_migrate_network_lock_reduction_interval_and_decay() { ); }); } + +#[test] +fn test_migrate_restore_subnet_locked_feb1_2025() { + use sp_runtime::traits::SaturatedConversion; // only for NetUid -> u16 when reading back + use std::collections::BTreeMap; + + use crate::{HasMigrationRun, SubnetLocked, TaoCurrency}; + + // NOTE: Ensure the migration uses `TaoCurrency::from(rao_u64)` and a `&[(u16, u64)]` snapshot. + new_test_ext(0).execute_with(|| { + // ── pre ────────────────────────────────────────────────────────────── + let name = b"migrate_restore_subnet_locked".to_vec(); + assert!( + !HasMigrationRun::::get(name.clone()), + "HasMigrationRun should be false before migration" + ); + + // Snapshot at block #4_828_623 (2025-02-01 00:00:00Z), RAO as u64. + const EXPECTED: &[(u16, u64)] = &[ + (2, 976_893_069_056), + (3, 2_569_362_397_490), + (4, 1_928_551_593_932), + (5, 1_712_540_082_588), + (6, 1_495_929_556_770), + (7, 1_011_702_451_936), + (8, 337_484_391_024), + (9, 381_240_180_320), + (10, 1_253_515_128_353), + (11, 1_453_924_672_132), + (12, 100_000_000_000), + (13, 100_000_000_000), + (14, 1_489_714_521_808), + (15, 1_784_089_225_496), + (16, 889_176_219_484), + (17, 1_266_310_122_772), + (18, 222_355_058_433), + (19, 100_000_000_000), + (20, 100_000_000_000), + (21, 885_096_322_978), + (22, 100_000_000_000), + (23, 100_000_000_000), + (24, 5_146_073_854_481), + (25, 1_782_920_948_214), + (26, 153_583_865_248), + (27, 201_344_183_084), + (28, 901_455_879_445), + (29, 175_000_001_600), + (30, 1_419_730_660_074), + (31, 319_410_100_502), + (32, 2_016_397_028_246), + (33, 1_626_477_274_174), + (34, 1_455_297_496_345), + (35, 1_191_275_979_639), + (36, 1_097_008_574_216), + (37, 864_664_455_362), + (38, 1_001_936_494_076), + (39, 1_366_096_404_884), + (40, 100_000_000_000), + (41, 535_937_523_200), + (42, 1_215_698_423_344), + (43, 1_641_308_676_800), + (44, 1_514_636_189_434), + (45, 1_605_608_381_438), + (46, 1_095_943_027_350), + (47, 1_499_235_469_986), + (48, 1_308_073_720_362), + (49, 1_222_672_092_068), + (50, 2_628_355_421_561), + (51, 1_520_860_720_561), + (52, 1_794_457_248_725), + (53, 1_721_472_811_492), + (54, 2_048_900_691_868), + (55, 1_278_597_446_119), + (56, 2_016_045_544_480), + (57, 1_920_563_399_676), + (58, 2_246_525_691_504), + (59, 1_776_159_384_888), + (60, 2_173_138_865_414), + (61, 1_435_634_867_728), + (62, 2_061_282_563_888), + (63, 3_008_967_320_998), + (64, 2_099_236_359_026), + ]; + + // ── run migration ──────────────────────────────────────────────────── + let weight = + crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); + assert!(!weight.is_zero(), "migration weight should be > 0"); + + // ── validate: build a (u16 -> u64) map directly from storage iterator ─ + let actual: BTreeMap = SubnetLocked::::iter() + .map(|(k, v)| (k.saturated_into::(), u64::from(v))) + .collect(); + + let expected: BTreeMap = EXPECTED.iter().copied().collect(); + + // 1) exact content match (keys and values) + assert_eq!(actual, expected, "SubnetLocked map mismatch with snapshot"); + + // 2) count and total sum match expectations + let expected_len = expected.len(); + let expected_sum: u128 = expected.values().map(|v| *v as u128).sum(); + + let count_after = actual.len(); + let sum_after: u128 = actual.values().map(|v| *v as u128).sum(); + + assert_eq!(count_after, expected_len, "entry count mismatch"); + assert_eq!(sum_after, expected_sum, "total RAO sum mismatch"); + + // ── migration flag ─────────────────────────────────────────────────── + assert!( + HasMigrationRun::::get(name.clone()), + "HasMigrationRun should be true after migration" + ); + + // ── idempotence: re-running does not change storage ───────────────── + let before = actual; + + let _again = + crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); + + let after: BTreeMap = SubnetLocked::::iter() + .map(|(k, v)| (k.saturated_into::(), u64::from(v))) + .collect(); + + assert_eq!( + before, after, + "re-running the migration should not change storage" + ); + }); +} From e3e377ffb1dd0432117f2ecfb482d3b7424dd356 Mon Sep 17 00:00:00 2001 From: Aliaksandr Tsurko Date: Mon, 15 Sep 2025 23:22:18 +0300 Subject: [PATCH 249/379] Remove owner hparam to epochs migration --- pallets/subtensor/src/macros/hooks.rs | 2 -- .../migrate_owner_hparam_rl_to_epochs.rs | 36 ------------------- pallets/subtensor/src/migrations/mod.rs | 1 - 3 files changed, 39 deletions(-) delete mode 100644 pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index cbb72eed1f..b43f9422df 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -138,8 +138,6 @@ mod hooks { .saturating_add(migrations::migrate_fix_root_tao_and_alpha_in::migrate_fix_root_tao_and_alpha_in::()) // Migrate last block rate limiting storage items .saturating_add(migrations::migrate_rate_limiting_last_blocks::migrate_obsolete_rate_limiting_last_blocks_storage::()) - // Remove deprecated OwnerHyperparamRateLimit storage item - .saturating_add(migrations::migrate_owner_hparam_rl_to_epochs::migrate_owner_hyperparam_rl_to_epochs::()) // Migrate remove network modality .saturating_add(migrations::migrate_remove_network_modality::migrate_remove_network_modality::()); weight diff --git a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs b/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs deleted file mode 100644 index 236bb10d5f..0000000000 --- a/pallets/subtensor/src/migrations/migrate_owner_hparam_rl_to_epochs.rs +++ /dev/null @@ -1,36 +0,0 @@ -use super::*; -use crate::HasMigrationRun; -use codec::Decode; -use frame_support::weights::Weight; -use sp_io::hashing::twox_128; -use sp_io::storage::get; - -/// Migrate u64 to u16 in OwnerHyperparamRateLimit and new default -pub fn migrate_owner_hyperparam_rl_to_epochs() -> Weight { - let migration_name = b"migrate_owner_hyperparam_rl_to_epochs".to_vec(); - let mut weight = T::DbWeight::get().reads(1); - - if HasMigrationRun::::get(&migration_name) { - log::info!("Migration '{migration_name:?}' already executed. Skipping."); - return weight; - } - - let pallet_name = twox_128("SubtensorModule".as_bytes()); - let storage_name = twox_128("OwnerHyperparamRateLimit".as_bytes()); - let full_key = [pallet_name, storage_name].concat(); - - if let Some(value_bytes) = get(&full_key) { - if let Ok(old_limit_blocks) = ::decode(&mut &value_bytes[..]) { - if old_limit_blocks == 0u64 { - // Preserve disabled state - Pallet::::set_owner_hyperparam_rate_limit(0); - } - } - - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - } - - HasMigrationRun::::insert(&migration_name, true); - weight = weight.saturating_add(T::DbWeight::get().writes(1)); - weight -} diff --git a/pallets/subtensor/src/migrations/mod.rs b/pallets/subtensor/src/migrations/mod.rs index 9f0a6bce7f..b7265cc6d0 100644 --- a/pallets/subtensor/src/migrations/mod.rs +++ b/pallets/subtensor/src/migrations/mod.rs @@ -20,7 +20,6 @@ pub mod migrate_fix_root_tao_and_alpha_in; pub mod migrate_identities_v2; pub mod migrate_init_total_issuance; pub mod migrate_orphaned_storage_items; -pub mod migrate_owner_hparam_rl_to_epochs; pub mod migrate_populate_owned_hotkeys; pub mod migrate_rao; pub mod migrate_rate_limiting_last_blocks; From 13b23fd7a292a64dd3ea3ad81970af1b1824a0d4 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 15 Sep 2025 14:46:11 -0700 Subject: [PATCH 250/379] clear new subsubnet maps --- pallets/subtensor/src/coinbase/root.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 03a82cac93..e3acbf3432 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -545,6 +545,8 @@ impl Pallet { let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); } RevealPeriodEpochs::::remove(netuid); + SubsubnetCountCurrent::::remove(netuid); + SubsubnetEmissionSplit::::remove(netuid); // Last hotkey swap (DMAP where netuid is FIRST key → easy) let _ = LastHotkeySwapOnNetuid::::clear_prefix(netuid, u32::MAX, None); From d38f34aa949f24b2a1e77f8443f5c8382ad31a8c Mon Sep 17 00:00:00 2001 From: open-junius Date: Tue, 16 Sep 2025 20:44:24 +0800 Subject: [PATCH 251/379] bump version --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d49d5147e9..aa87c52a19 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 316, + spec_version: 317, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From aa4a23e41049df57129ea8dd7e3bf55afaa5d534 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 16 Sep 2025 11:04:15 -0300 Subject: [PATCH 252/379] fix benchmarks --- pallets/admin-utils/src/benchmarking.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 513df4a908..8ee5ec996b 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -11,6 +11,8 @@ use frame_benchmarking::v1::account; use frame_benchmarking::v2::*; use frame_support::BoundedVec; use frame_system::RawOrigin; +use pallet_subtensor::SubnetworkN; +use subtensor_runtime_common::NetUid; use super::*; @@ -240,14 +242,18 @@ mod benchmarks { #[benchmark] fn sudo_set_min_allowed_uids() { + let netuid = NetUid::from(1); pallet_subtensor::Pallet::::set_admin_freeze_window(0); pallet_subtensor::Pallet::::init_new_network( - 1u16.into(), /*netuid*/ + netuid, 1u16, /*tempo*/ ); + + // Artificially set that some neurons are already registered + SubnetworkN::::set(netuid, 32); #[extrinsic_call] - _(RawOrigin::Root, 1u16.into()/*netuid*/, 32u16/*max_allowed_uids*/)/*sudo_set_max_allowed_uids*/; + _(RawOrigin::Root, netuid, 16u16/*min_allowed_uids*/)/*sudo_set_min_allowed_uids*/; } #[benchmark] @@ -453,7 +459,7 @@ mod benchmarks { ); #[extrinsic_call] - _(RawOrigin::Root, 1u16.into()/*netuid*/, 4097u16/*max_allowed_uids*/)/*sudo_trim_to_max_allowed_uids()*/; + _(RawOrigin::Root, 1u16.into()/*netuid*/, 256u16/*max_n*/)/*sudo_trim_to_max_allowed_uids()*/; } //impl_benchmark_test_suite!(AdminUtils, crate::mock::new_test_ext(), crate::mock::Test); From 5da86f1a3609693eb0a26070649983ce09d2e95f Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 16 Sep 2025 11:13:46 -0300 Subject: [PATCH 253/379] cargo fmt --- pallets/admin-utils/src/benchmarking.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pallets/admin-utils/src/benchmarking.rs b/pallets/admin-utils/src/benchmarking.rs index 8ee5ec996b..67fba62b10 100644 --- a/pallets/admin-utils/src/benchmarking.rs +++ b/pallets/admin-utils/src/benchmarking.rs @@ -244,11 +244,8 @@ mod benchmarks { fn sudo_set_min_allowed_uids() { let netuid = NetUid::from(1); pallet_subtensor::Pallet::::set_admin_freeze_window(0); - pallet_subtensor::Pallet::::init_new_network( - netuid, - 1u16, /*tempo*/ - ); - + pallet_subtensor::Pallet::::init_new_network(netuid, 1u16 /*tempo*/); + // Artificially set that some neurons are already registered SubnetworkN::::set(netuid, 32); From 92ac0cf3e742f0dd5c43779b5d143e32a779343a Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 16 Sep 2025 11:24:26 -0400 Subject: [PATCH 254/379] add burn subnet alpha helper and TODOs --- pallets/subtensor/src/staking/helpers.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs index 7b1b644e85..85789aa5ea 100644 --- a/pallets/subtensor/src/staking/helpers.rs +++ b/pallets/subtensor/src/staking/helpers.rs @@ -323,8 +323,14 @@ impl Pallet { } pub fn recycle_subnet_alpha(netuid: NetUid, amount: AlphaCurrency) { + // TODO: record recycled alpha in a tracker SubnetAlphaOut::::mutate(netuid, |total| { *total = total.saturating_sub(amount); }); } + + pub fn burn_subnet_alpha(netuid: NetUid, amount: AlphaCurrency) { + // Do nothing; TODO: record burned alpha in a tracker + return; + } } From 0a3be2d43c81acf2291da54f41001f0c3abfb4a8 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Tue, 16 Sep 2025 11:24:44 -0400 Subject: [PATCH 255/379] use helpers for recycle/burn in coinbase --- pallets/subtensor/src/coinbase/run_coinbase.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 9665bfb586..ee0e50b96e 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -501,22 +501,18 @@ impl Pallet { log::debug!( "incentives: hotkey: {hotkey:?} is SN owner hotkey or associated hotkey, skipping {incentive:?}" ); - // Check if we should recycle or burn the incentive - match RecycleOrBurn::::try_get(netuid) { + // Check if we should recycle or burn the incentive + match RecycleOrBurn::::try_get(netuid) { Ok(RecycleOrBurn::Recycle) => { log::debug!("recycling {incentive:?}"); - // recycle the incentive - - // Recycle means we should decrease the alpha issuance tracker. - SubnetAlphaOut::::mutate(netuid, |total| { - *total = total.saturating_sub(incentive); - }); + Self::recycle_subnet_alpha(netuid, incentive); } Ok(RecycleOrBurn::Burn) | Err(_) => { - log::debug!("burning {incentive:?}"); // Skip/burn miner-emission for SN owner hotkey. + log::debug!("burning {incentive:?}"); + Self::burn_subnet_alpha(netuid, incentive); } } - continue; + continue; } let owner: T::AccountId = Owner::::get(&hotkey); From 261ef5a1f09c5cf60c963bcd52d61fee43babbf1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 16 Sep 2025 16:36:52 +0000 Subject: [PATCH 256/379] auto-update benchmark weights --- pallets/admin-utils/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 75ea020708..9c69a873c2 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1913,8 +1913,8 @@ pub mod pallet { /// the lowest emitters while preserving temporally and owner immune UIDs. The UIDs are /// then compressed to the left and storage is migrated to the new compressed UIDs. #[pallet::call_index(78)] - #[pallet::weight(Weight::from_parts(15_000_000, 0) - .saturating_add(::DbWeight::get().reads(1_u64)) + #[pallet::weight(Weight::from_parts(32_880_000, 0) + .saturating_add(::DbWeight::get().reads(6_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_trim_to_max_allowed_uids( origin: OriginFor, @@ -1940,8 +1940,8 @@ pub mod pallet { /// The extrinsic sets the minimum allowed UIDs for a subnet. /// It is only callable by the root account. #[pallet::call_index(79)] - #[pallet::weight(Weight::from_parts(18_800_000, 0) - .saturating_add(::DbWeight::get().reads(2_u64)) + #[pallet::weight(Weight::from_parts(24_370_000, 0) + .saturating_add(::DbWeight::get().reads(3_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_uids( origin: OriginFor, From 60834c828220a31e148a40a0fb60f4770a73cb7e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 16 Sep 2025 09:44:20 -0700 Subject: [PATCH 257/379] add NetworkRegistrationStartBlock --- pallets/subtensor/src/lib.rs | 10 ++ ...migrate_network_lock_reduction_interval.rs | 16 ++- .../src/migrations/migrate_subnet_locked.rs | 128 +++++++++--------- pallets/subtensor/src/subnets/subnet.rs | 7 +- pallets/subtensor/src/tests/migration.rs | 42 +++++- scripts/benchmark_action.sh | 2 +- 6 files changed, 130 insertions(+), 75 deletions(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 29e11d62cf..331ca94e6c 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -551,6 +551,11 @@ pub mod pallet { T::InitialNetworkRateLimit::get() } #[pallet::type_value] + /// Default value for network rate limit. + pub fn DefaultNetworkRegistrationStartBlock() -> u64 { + 0 + } + #[pallet::type_value] /// Default value for weights version key rate limit. /// In units of tempos. pub fn DefaultWeightsVersionKeyRateLimit() -> u64 { @@ -1833,6 +1838,11 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; + #[pallet::storage] + /// ITEM( NetworkRegistrationStartBlock ) + pub type NetworkRegistrationStartBlock = + StorageValue<_, u64, ValueQuery, DefaultNetworkRegistrationStartBlock>; + /// ====================== /// ==== Sub-subnets ===== /// ====================== diff --git a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs index 56079b0a13..c19e16067e 100644 --- a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs +++ b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs @@ -5,6 +5,7 @@ use scale_info::prelude::string::String; pub fn migrate_network_lock_reduction_interval() -> Weight { const NEW_VALUE: u64 = 28_800; + const ONE_WEEK_BLOCKS: u64 = 50_400; let migration_name = b"migrate_network_lock_reduction_interval".to_vec(); let mut weight = T::DbWeight::get().reads(1); @@ -19,6 +20,8 @@ pub fn migrate_network_lock_reduction_interval() -> Weight { return weight; } + let current_block = Pallet::::get_current_block_as_u64(); + // ── 1) Set new values ───────────────────────────────────────────────── NetworkLockReductionInterval::::put(NEW_VALUE); weight = weight.saturating_add(T::DbWeight::get().writes(1)); @@ -29,7 +32,12 @@ pub fn migrate_network_lock_reduction_interval() -> Weight { Pallet::::set_network_last_lock(TaoCurrency::from(1_000_000_000_000)); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - Pallet::::set_network_last_lock_block(Pallet::::get_current_block_as_u64()); + // Hold price at 2000 TAO until day 7, then begin linear decay + Pallet::::set_network_last_lock_block(current_block.saturating_add(ONE_WEEK_BLOCKS)); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + // Allow registrations starting at day 7 + NetworkRegistrationStartBlock::::put(current_block.saturating_add(ONE_WEEK_BLOCKS)); weight = weight.saturating_add(T::DbWeight::get().writes(1)); // ── 2) Mark migration done ─────────────────────────────────────────── @@ -38,9 +46,11 @@ pub fn migrate_network_lock_reduction_interval() -> Weight { log::info!( target: "runtime", - "Migration '{}' completed - NetworkLockReductionInterval => {}.", + "Migration '{}' completed - NetworkLockReductionInterval & NetworkRateLimit => {}. \ + last_lock set to 1_000_000_000_000 rao; last_lock_block/start_block => {}.", String::from_utf8_lossy(&migration_name), - NEW_VALUE + NEW_VALUE, + current_block.saturating_add(ONE_WEEK_BLOCKS), ); weight diff --git a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs index 40f199e8af..e72881ea7d 100644 --- a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs +++ b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs @@ -1,8 +1,8 @@ use super::*; +use crate::{Config, HasMigrationRun, SubnetLocked, TaoCurrency}; use frame_support::weights::Weight; use log; use scale_info::prelude::string::String; -use crate::{Config, HasMigrationRun, SubnetLocked, TaoCurrency}; use subtensor_runtime_common::NetUid; pub fn migrate_restore_subnet_locked() -> Weight { @@ -21,69 +21,69 @@ pub fn migrate_restore_subnet_locked() -> Weight { // (netuid, locked_rao) pairs taken from the historical snapshot (block #4_828_623). const SUBNET_LOCKED: &[(u16, u64)] = &[ - ( 2, 976_893_069_056), - ( 3,2_569_362_397_490), - ( 4,1_928_551_593_932), - ( 5,1_712_540_082_588), - ( 6,1_495_929_556_770), - ( 7,1_011_702_451_936), - ( 8, 337_484_391_024), - ( 9, 381_240_180_320), - ( 10,1_253_515_128_353), - ( 11,1_453_924_672_132), - ( 12, 100_000_000_000), - ( 13, 100_000_000_000), - ( 14,1_489_714_521_808), - ( 15,1_784_089_225_496), - ( 16, 889_176_219_484), - ( 17,1_266_310_122_772), - ( 18, 222_355_058_433), - ( 19, 100_000_000_000), - ( 20, 100_000_000_000), - ( 21, 885_096_322_978), - ( 22, 100_000_000_000), - ( 23, 100_000_000_000), - ( 24,5_146_073_854_481), - ( 25,1_782_920_948_214), - ( 26, 153_583_865_248), - ( 27, 201_344_183_084), - ( 28, 901_455_879_445), - ( 29, 175_000_001_600), - ( 30,1_419_730_660_074), - ( 31, 319_410_100_502), - ( 32,2_016_397_028_246), - ( 33,1_626_477_274_174), - ( 34,1_455_297_496_345), - ( 35,1_191_275_979_639), - ( 36,1_097_008_574_216), - ( 37, 864_664_455_362), - ( 38,1_001_936_494_076), - ( 39,1_366_096_404_884), - ( 40, 100_000_000_000), - ( 41, 535_937_523_200), - ( 42,1_215_698_423_344), - ( 43,1_641_308_676_800), - ( 44,1_514_636_189_434), - ( 45,1_605_608_381_438), - ( 46,1_095_943_027_350), - ( 47,1_499_235_469_986), - ( 48,1_308_073_720_362), - ( 49,1_222_672_092_068), - ( 50,2_628_355_421_561), - ( 51,1_520_860_720_561), - ( 52,1_794_457_248_725), - ( 53,1_721_472_811_492), - ( 54,2_048_900_691_868), - ( 55,1_278_597_446_119), - ( 56,2_016_045_544_480), - ( 57,1_920_563_399_676), - ( 58,2_246_525_691_504), - ( 59,1_776_159_384_888), - ( 60,2_173_138_865_414), - ( 61,1_435_634_867_728), - ( 62,2_061_282_563_888), - ( 63,3_008_967_320_998), - ( 64,2_099_236_359_026), + (2, 976_893_069_056), + (3, 2_569_362_397_490), + (4, 1_928_551_593_932), + (5, 1_712_540_082_588), + (6, 1_495_929_556_770), + (7, 1_011_702_451_936), + (8, 337_484_391_024), + (9, 381_240_180_320), + (10, 1_253_515_128_353), + (11, 1_453_924_672_132), + (12, 100_000_000_000), + (13, 100_000_000_000), + (14, 1_489_714_521_808), + (15, 1_784_089_225_496), + (16, 889_176_219_484), + (17, 1_266_310_122_772), + (18, 222_355_058_433), + (19, 100_000_000_000), + (20, 100_000_000_000), + (21, 885_096_322_978), + (22, 100_000_000_000), + (23, 100_000_000_000), + (24, 5_146_073_854_481), + (25, 1_782_920_948_214), + (26, 153_583_865_248), + (27, 201_344_183_084), + (28, 901_455_879_445), + (29, 175_000_001_600), + (30, 1_419_730_660_074), + (31, 319_410_100_502), + (32, 2_016_397_028_246), + (33, 1_626_477_274_174), + (34, 1_455_297_496_345), + (35, 1_191_275_979_639), + (36, 1_097_008_574_216), + (37, 864_664_455_362), + (38, 1_001_936_494_076), + (39, 1_366_096_404_884), + (40, 100_000_000_000), + (41, 535_937_523_200), + (42, 1_215_698_423_344), + (43, 1_641_308_676_800), + (44, 1_514_636_189_434), + (45, 1_605_608_381_438), + (46, 1_095_943_027_350), + (47, 1_499_235_469_986), + (48, 1_308_073_720_362), + (49, 1_222_672_092_068), + (50, 2_628_355_421_561), + (51, 1_520_860_720_561), + (52, 1_794_457_248_725), + (53, 1_721_472_811_492), + (54, 2_048_900_691_868), + (55, 1_278_597_446_119), + (56, 2_016_045_544_480), + (57, 1_920_563_399_676), + (58, 2_246_525_691_504), + (59, 1_776_159_384_888), + (60, 2_173_138_865_414), + (61, 1_435_634_867_728), + (62, 2_061_282_563_888), + (63, 3_008_967_320_998), + (64, 2_099_236_359_026), ]; let mut inserted: u32 = 0; diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 3719a40f8d..d3eefc16dc 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -128,8 +128,13 @@ impl Pallet { // --- 3. Ensure the mechanism is Dynamic. ensure!(mechid == 1, Error::::MechanismDoesNotExist); - // --- 4. Rate limit for network registrations. let current_block = Self::get_current_block_as_u64(); + ensure!( + current_block >= NetworkRegistrationStartBlock::::get(), + Error::::SubNetRegistrationDisabled + ); + + // --- 4. Rate limit for network registrations. ensure!( Self::passes_rate_limit(&TransactionType::RegisterNetwork, &coldkey), Error::::NetworkTxRateLimitExceeded diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index f052d70e96..d99625ece2 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1757,26 +1757,56 @@ fn test_migrate_subnet_limit_to_default() { #[test] fn test_migrate_network_lock_reduction_interval_and_decay() { new_test_ext(0).execute_with(|| { + const NEW_VALUE: u64 = 28_800; + const ONE_WEEK_BLOCKS: u64 = 50_400; + // ── pre ────────────────────────────────────────────────────────────── assert!( !HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), "HasMigrationRun should be false before migration" ); - // ensure current_block > 0 so mult = 2 after migration + // ensure current_block > 0 step_block(1); + let current_block_before = Pallet::::get_current_block_as_u64(); // ── run migration ──────────────────────────────────────────────────── let weight = crate::migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::(); assert!(!weight.is_zero(), "migration weight should be > 0"); // ── params & flags ─────────────────────────────────────────────────── - assert_eq!(NetworkLockReductionInterval::::get(), 28_800); - assert_eq!(NetworkRateLimit::::get(), 28_800); - assert_eq!(Pallet::::get_network_last_lock(), 1_000_000_000_000u64.into()); // 1000 TAO in rAO + assert_eq!(NetworkLockReductionInterval::::get(), NEW_VALUE); + assert_eq!(NetworkRateLimit::::get(), NEW_VALUE); + assert_eq!( + Pallet::::get_network_last_lock(), + 1_000_000_000_000u64.into(), // 1000 TAO in rao + "last_lock should be 1_000_000_000_000 rao" + ); + + // last_lock_block should be set one week in the future + let last_lock_block = Pallet::::get_network_last_lock_block(); + let expected_block = current_block_before.saturating_add(ONE_WEEK_BLOCKS); + assert_eq!( + last_lock_block, + expected_block, + "last_lock_block should be current + ONE_WEEK_BLOCKS" + ); + + // registration start block should match the same future block + assert_eq!( + NetworkRegistrationStartBlock::::get(), + expected_block, + "NetworkRegistrationStartBlock should equal last_lock_block" + ); + + // lock cost should be 2000 TAO immediately after migration + let lock_cost_now = Pallet::::get_network_lock_cost(); + assert_eq!( + lock_cost_now, + 2_000_000_000_000u64.into(), + "lock cost should be 2000 TAO right after migration" + ); - let start_block = Pallet::::get_network_last_lock_block(); - assert_eq!(start_block, Pallet::::get_current_block_as_u64()); assert!( HasMigrationRun::::get(b"migrate_network_lock_reduction_interval".to_vec()), "HasMigrationRun should be true after migration" diff --git a/scripts/benchmark_action.sh b/scripts/benchmark_action.sh index 105cbe9bf5..a475211163 100755 --- a/scripts/benchmark_action.sh +++ b/scripts/benchmark_action.sh @@ -11,7 +11,7 @@ declare -A DISPATCH_PATHS=( [swap]="../pallets/swap/src/pallet/mod.rs" ) -THRESHOLD=20 +THRESHOLD=40 MAX_RETRIES=3 SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" From 1005c974c1e3a1535eadaa211d54e9dd0764f8af Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 16 Sep 2025 10:36:52 -0700 Subject: [PATCH 258/379] clippy --- pallets/subtensor/src/staking/remove_stake.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index d3834c283a..8a691a9866 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -476,10 +476,7 @@ impl Pallet { Ok(sim) => TaoCurrency::from(sim.amount_paid_out), Err(e) => { log::debug!( - "destroy_alpha_in_out_stakes: sim_swap owner α→τ failed (netuid={:?}, alpha={}, err={:?}); falling back to price multiply.", - netuid, - owner_alpha_u64, - e + "destroy_alpha_in_out_stakes: sim_swap owner α→τ failed (netuid={netuid:?}, alpha={owner_alpha_u64}, err={e:?}); falling back to price multiply.", ); let cur_price: U96F32 = T::SwapInterface::current_alpha_price(netuid.into()); let val_u64: u64 = U96F32::from_num(owner_alpha_u64) From eb1110697fb6e02a1ef2109eeac6f45452fe0f23 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 16 Sep 2025 11:07:32 -0700 Subject: [PATCH 259/379] remove unused --- common/src/lib.rs | 1 - pallets/subtensor/src/lib.rs | 3 --- pallets/swap/src/mock.rs | 4 ---- 3 files changed, 8 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 3fc40f695a..347d2dbed4 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -175,7 +175,6 @@ pub trait SubnetInfo { fn mechanism(netuid: NetUid) -> u16; fn is_owner(account_id: &AccountId, netuid: NetUid) -> bool; fn is_subtoken_enabled(netuid: NetUid) -> bool; - fn get_owned_hotkeys(coldkey: &AccountId) -> Vec; } pub trait BalanceOps { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index f8e8271e91..0d8537b64b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -2119,9 +2119,6 @@ impl> fn is_subtoken_enabled(netuid: NetUid) -> bool { SubtokenEnabled::::get(netuid) } - fn get_owned_hotkeys(coldkey: &T::AccountId) -> Vec { - OwnedHotkeys::::get(coldkey) - } } impl> diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index 9653f9ee22..40aac6d796 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -120,10 +120,6 @@ impl SubnetInfo for MockLiquidityProvider { fn is_subtoken_enabled(netuid: NetUid) -> bool { netuid.inner() != SUBTOKEN_DISABLED_NETUID } - - fn get_owned_hotkeys(_coldkey: &AccountId) -> Vec { - Vec::::new() - } } pub struct MockBalanceOps; From d7dcbd8de02f8176cd17f028bd223545f0cc9bfc Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 16 Sep 2025 12:19:07 -0700 Subject: [PATCH 260/379] 8 day NetworkLockReductionInterval --- .../migrate_network_lock_reduction_interval.rs | 12 +++++------- pallets/subtensor/src/tests/migration.rs | 7 ++++--- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs index c19e16067e..99bb5b6e97 100644 --- a/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs +++ b/pallets/subtensor/src/migrations/migrate_network_lock_reduction_interval.rs @@ -4,7 +4,8 @@ use log; use scale_info::prelude::string::String; pub fn migrate_network_lock_reduction_interval() -> Weight { - const NEW_VALUE: u64 = 28_800; + const FOUR_DAYS: u64 = 28_800; + const EIGHT_DAYS: u64 = 57_600; const ONE_WEEK_BLOCKS: u64 = 50_400; let migration_name = b"migrate_network_lock_reduction_interval".to_vec(); @@ -23,10 +24,10 @@ pub fn migrate_network_lock_reduction_interval() -> Weight { let current_block = Pallet::::get_current_block_as_u64(); // ── 1) Set new values ───────────────────────────────────────────────── - NetworkLockReductionInterval::::put(NEW_VALUE); + NetworkLockReductionInterval::::put(EIGHT_DAYS); weight = weight.saturating_add(T::DbWeight::get().writes(1)); - NetworkRateLimit::::put(NEW_VALUE); + NetworkRateLimit::::put(FOUR_DAYS); weight = weight.saturating_add(T::DbWeight::get().writes(1)); Pallet::::set_network_last_lock(TaoCurrency::from(1_000_000_000_000)); @@ -46,11 +47,8 @@ pub fn migrate_network_lock_reduction_interval() -> Weight { log::info!( target: "runtime", - "Migration '{}' completed - NetworkLockReductionInterval & NetworkRateLimit => {}. \ - last_lock set to 1_000_000_000_000 rao; last_lock_block/start_block => {}.", + "Migration '{}' completed.", String::from_utf8_lossy(&migration_name), - NEW_VALUE, - current_block.saturating_add(ONE_WEEK_BLOCKS), ); weight diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index d99625ece2..1e58d2f7ab 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1757,7 +1757,8 @@ fn test_migrate_subnet_limit_to_default() { #[test] fn test_migrate_network_lock_reduction_interval_and_decay() { new_test_ext(0).execute_with(|| { - const NEW_VALUE: u64 = 28_800; + const FOUR_DAYS: u64 = 28_800; + const EIGHT_DAYS: u64 = 57_600; const ONE_WEEK_BLOCKS: u64 = 50_400; // ── pre ────────────────────────────────────────────────────────────── @@ -1775,8 +1776,8 @@ fn test_migrate_network_lock_reduction_interval_and_decay() { assert!(!weight.is_zero(), "migration weight should be > 0"); // ── params & flags ─────────────────────────────────────────────────── - assert_eq!(NetworkLockReductionInterval::::get(), NEW_VALUE); - assert_eq!(NetworkRateLimit::::get(), NEW_VALUE); + assert_eq!(NetworkLockReductionInterval::::get(), EIGHT_DAYS); + assert_eq!(NetworkRateLimit::::get(), FOUR_DAYS); assert_eq!( Pallet::::get_network_last_lock(), 1_000_000_000_000u64.into(), // 1000 TAO in rao From b6a57500eb4c924288cbc812d598e679176e5689 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 16 Sep 2025 12:25:42 -0700 Subject: [PATCH 261/379] clippy --- common/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 347d2dbed4..26aa6b2f13 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -9,7 +9,7 @@ use runtime_common::prod_or_fast; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ - MultiSignature, Vec, + MultiSignature, traits::{IdentifyAccount, Verify}, }; use subtensor_macros::freeze_struct; From f51659371a1a56fb0c68acab5f5d6861866f8665 Mon Sep 17 00:00:00 2001 From: Loris Moulin Date: Tue, 16 Sep 2025 18:48:53 -0300 Subject: [PATCH 262/379] fix rate limit --- pallets/admin-utils/src/lib.rs | 7 ++++--- pallets/subtensor/src/lib.rs | 2 +- pallets/subtensor/src/utils/rate_limiting.rs | 8 ++++---- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 9c69a873c2..89c83d68fc 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1924,7 +1924,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin.clone(), netuid, - &[TransactionType::SetMaxAllowedUids], + &[TransactionType::MaxUidsTrimming], )?; pallet_subtensor::Pallet::::trim_to_max_allowed_uids(netuid, max_n)?; @@ -1932,7 +1932,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SetMaxAllowedUids], + &[TransactionType::MaxUidsTrimming], ); Ok(()) } @@ -1948,7 +1948,8 @@ pub mod pallet { netuid: NetUid, min_allowed_uids: u16, ) -> DispatchResult { - ensure_root(origin)?; + pallet_subtensor::Pallet::::ensure_root_with_rate_limit(origin, netuid)?; + ensure!( pallet_subtensor::Pallet::::if_subnet_exist(netuid), Error::::SubnetDoesNotExist diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 439129efc9..00fab25ae4 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -619,7 +619,7 @@ pub mod pallet { } #[pallet::type_value] /// -- Rate limit for set max allowed UIDs - pub fn SetMaxAllowedUidsRateLimit() -> u64 { + pub fn MaxUidsTrimmingRateLimit() -> u64 { prod_or_fast!(30 * 7200, 1) } #[pallet::type_value] diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index e9b33b3379..190634212a 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -15,7 +15,7 @@ pub enum TransactionType { OwnerHyperparamUpdate(Hyperparameter), SubsubnetCountUpdate, SubsubnetEmission, - SetMaxAllowedUids, + MaxUidsTrimming, } impl TransactionType { @@ -27,7 +27,7 @@ impl TransactionType { Self::RegisterNetwork => NetworkRateLimit::::get(), Self::SubsubnetCountUpdate => SubsubnetCountSetRateLimit::::get(), Self::SubsubnetEmission => SubsubnetEmissionRateLimit::::get(), - + Self::MaxUidsTrimming => MaxUidsTrimmingRateLimit::::get(), Self::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, } @@ -140,7 +140,7 @@ impl From for u16 { TransactionType::OwnerHyperparamUpdate(_) => 6, TransactionType::SubsubnetCountUpdate => 7, TransactionType::SubsubnetEmission => 8, - TransactionType::SetMaxAllowedUids => 9, + TransactionType::MaxUidsTrimming => 9, } } } @@ -157,7 +157,7 @@ impl From for TransactionType { 6 => TransactionType::OwnerHyperparamUpdate(Hyperparameter::Unknown), 7 => TransactionType::SubsubnetCountUpdate, 8 => TransactionType::SubsubnetEmission, - 9 => TransactionType::SetMaxAllowedUids, + 9 => TransactionType::MaxUidsTrimming, _ => TransactionType::Unknown, } } From d79479545f3f9053464cc56d148ec82b8fe00581 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 17 Sep 2025 00:11:43 +0000 Subject: [PATCH 263/379] auto-update benchmark weights --- pallets/admin-utils/src/lib.rs | 4 ++-- pallets/subtensor/src/macros/dispatches.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 89c83d68fc..8b4f7204c3 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1940,8 +1940,8 @@ pub mod pallet { /// The extrinsic sets the minimum allowed UIDs for a subnet. /// It is only callable by the root account. #[pallet::call_index(79)] - #[pallet::weight(Weight::from_parts(24_370_000, 0) - .saturating_add(::DbWeight::get().reads(3_u64)) + #[pallet::weight(Weight::from_parts(31_550_000, 0) + .saturating_add(::DbWeight::get().reads(5_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] pub fn sudo_set_min_allowed_uids( origin: OriginFor, diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index a8b981048b..f4583c0711 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -882,7 +882,7 @@ mod dispatches { /// - Attempting to set prometheus information withing the rate limit min. /// #[pallet::call_index(40)] - #[pallet::weight((Weight::from_parts(41_240_000, 0) + #[pallet::weight((Weight::from_parts(32_510_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_axon_tls( From f7fc197ab52a010e284e0a54aaa9b7bda66373f0 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 17 Sep 2025 12:20:15 -0400 Subject: [PATCH 264/379] Rename subsubnets to mechanisms --- common/src/lib.rs | 32 +- pallets/admin-utils/src/lib.rs | 22 +- pallets/admin-utils/src/tests/mod.rs | 64 +-- pallets/subtensor/rpc/src/lib.rs | 14 +- pallets/subtensor/runtime-api/src/lib.rs | 6 +- .../subtensor/src/coinbase/reveal_commits.rs | 30 +- pallets/subtensor/src/coinbase/root.rs | 22 +- .../subtensor/src/coinbase/run_coinbase.rs | 2 +- pallets/subtensor/src/epoch/run_epoch.rs | 28 +- pallets/subtensor/src/lib.rs | 30 +- pallets/subtensor/src/macros/dispatches.rs | 60 +-- pallets/subtensor/src/macros/errors.rs | 4 +- pallets/subtensor/src/rpc_info/metagraph.rs | 26 +- .../subtensor/src/staking/recycle_alpha.rs | 4 +- pallets/subtensor/src/staking/set_children.rs | 6 +- .../subnets/{subsubnet.rs => mechanism.rs} | 106 ++--- pallets/subtensor/src/subnets/mod.rs | 2 +- pallets/subtensor/src/subnets/registration.rs | 8 +- pallets/subtensor/src/subnets/serving.rs | 4 +- pallets/subtensor/src/subnets/subnet.rs | 4 +- pallets/subtensor/src/subnets/uids.rs | 26 +- pallets/subtensor/src/subnets/weights.rs | 64 +-- pallets/subtensor/src/swap/swap_hotkey.rs | 6 +- pallets/subtensor/src/tests/children.rs | 10 +- .../src/tests/{subsubnet.rs => mechanism.rs} | 392 +++++++++--------- pallets/subtensor/src/tests/mod.rs | 2 +- pallets/subtensor/src/tests/recycle_alpha.rs | 4 +- pallets/subtensor/src/tests/subnet.rs | 2 +- pallets/subtensor/src/utils/rate_limiting.rs | 16 +- pallets/swap/src/pallet/mod.rs | 12 +- pallets/swap/src/pallet/tests.rs | 2 +- runtime/src/lib.rs | 16 +- 32 files changed, 512 insertions(+), 514 deletions(-) rename pallets/subtensor/src/subnets/{subsubnet.rs => mechanism.rs} (80%) rename pallets/subtensor/src/tests/{subsubnet.rs => mechanism.rs} (80%) diff --git a/common/src/lib.rs b/common/src/lib.rs index 26aa6b2f13..6122ef99fa 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -224,7 +224,7 @@ pub mod time { pub const DAYS: BlockNumber = HOURS * 24; } -#[freeze_struct("8e576b32bb1bb664")] +#[freeze_struct("7e5202d7f18b39d4")] #[repr(transparent)] #[derive( Deserialize, @@ -244,43 +244,43 @@ pub mod time { RuntimeDebug, )] #[serde(transparent)] -pub struct SubId(u8); +pub struct MechId(u8); -impl SubId { - pub const MAIN: SubId = Self(0); +impl MechId { + pub const MAIN: MechId = Self(0); } -impl From for SubId { +impl From for MechId { fn from(value: u8) -> Self { Self(value) } } -impl From for u16 { - fn from(val: SubId) -> Self { +impl From for u16 { + fn from(val: MechId) -> Self { u16::from(val.0) } } -impl From for u64 { - fn from(val: SubId) -> Self { +impl From for u64 { + fn from(val: MechId) -> Self { u64::from(val.0) } } -impl From for u8 { - fn from(val: SubId) -> Self { +impl From for u8 { + fn from(val: MechId) -> Self { u8::from(val.0) } } -impl Display for SubId { +impl Display for MechId { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Display::fmt(&self.0, f) } } -impl CompactAs for SubId { +impl CompactAs for MechId { type As = u8; fn encode_as(&self) -> &Self::As { @@ -292,13 +292,13 @@ impl CompactAs for SubId { } } -impl From> for SubId { - fn from(c: Compact) -> Self { +impl From> for MechId { + fn from(c: Compact) -> Self { c.0 } } -impl TypeInfo for SubId { +impl TypeInfo for MechId { type Identity = ::Identity; fn type_info() -> scale_info::Type { ::type_info() diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 8b4f7204c3..ab9bcc0e24 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -28,7 +28,7 @@ pub mod pallet { use pallet_subtensor::utils::rate_limiting::{Hyperparameter, TransactionType}; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; + use subtensor_runtime_common::{NetUid, MechId, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] @@ -1855,38 +1855,38 @@ pub mod pallet { Ok(()) } - /// Sets the desired number of subsubnets in a subnet + /// Sets the desired number of mechanisms in a subnet #[pallet::call_index(76)] #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] - pub fn sudo_set_subsubnet_count( + pub fn sudo_set_mechanism_count( origin: OriginFor, netuid: NetUid, - subsub_count: SubId, + mechanism_count: MechId, ) -> DispatchResult { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::SubsubnetCountUpdate], + &[TransactionType::MechanismCountUpdate], )?; - pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; + pallet_subtensor::Pallet::::do_set_mechanism_count(netuid, mechanism_count)?; pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SubsubnetCountUpdate], + &[TransactionType::MechanismCountUpdate], ); Ok(()) } - /// Sets the emission split between subsubnets in a subnet + /// Sets the emission split between mechanisms in a subnet #[pallet::call_index(77)] #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] - pub fn sudo_set_subsubnet_emission_split( + pub fn sudo_set_mechanism_emission_split( origin: OriginFor, netuid: NetUid, maybe_split: Option>, @@ -1894,7 +1894,7 @@ pub mod pallet { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, netuid, - &[TransactionType::SubsubnetEmission], + &[TransactionType::MechanismEmission], )?; pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; @@ -1902,7 +1902,7 @@ pub mod pallet { pallet_subtensor::Pallet::::record_owner_rl( maybe_owner, netuid, - &[TransactionType::SubsubnetEmission], + &[TransactionType::MechanismEmission], ); Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 3b68db2a91..033c711902 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -14,7 +14,7 @@ use pallet_subtensor::{Event, utils::rate_limiting::TransactionType}; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, SubId, TaoCurrency}; +use subtensor_runtime_common::{Currency, NetUid, MechId, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -2322,11 +2322,11 @@ fn test_sudo_set_max_burn() { } #[test] -fn test_sudo_set_subsubnet_count() { +fn test_sudo_set_mechanism_count() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); - let ss_count_ok = SubId::from(8); - let ss_count_bad = SubId::from(9); + let ss_count_ok = MechId::from(8); + let ss_count_bad = MechId::from(9); let sn_owner = U256::from(1324); add_network(netuid, 10); @@ -2334,7 +2334,7 @@ fn test_sudo_set_subsubnet_count() { SubnetOwner::::insert(netuid, sn_owner); assert_eq!( - AdminUtils::sudo_set_subsubnet_count( + AdminUtils::sudo_set_mechanism_count( <::RuntimeOrigin>::signed(U256::from(1)), netuid, ss_count_ok @@ -2342,17 +2342,17 @@ fn test_sudo_set_subsubnet_count() { Err(DispatchError::BadOrigin) ); assert_noop!( - AdminUtils::sudo_set_subsubnet_count(RuntimeOrigin::root(), netuid, ss_count_bad), + AdminUtils::sudo_set_mechanism_count(RuntimeOrigin::root(), netuid, ss_count_bad), pallet_subtensor::Error::::InvalidValue ); - assert_ok!(AdminUtils::sudo_set_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_mechanism_count( <::RuntimeOrigin>::root(), netuid, ss_count_ok )); - assert_ok!(AdminUtils::sudo_set_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_mechanism_count( <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok @@ -2360,28 +2360,28 @@ fn test_sudo_set_subsubnet_count() { }); } -// cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_subsubnet_count_and_emissions --exact --show-output +// cargo test --package pallet-admin-utils --lib -- tests::test_sudo_set_mechanism_count_and_emissions --exact --show-output #[test] -fn test_sudo_set_subsubnet_count_and_emissions() { +fn test_sudo_set_mechanism_count_and_emissions() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); - let ss_count_ok = SubId::from(2); + let ss_count_ok = MechId::from(2); let sn_owner = U256::from(1324); add_network(netuid, 10); // Set the Subnet Owner SubnetOwner::::insert(netuid, sn_owner); - assert_ok!(AdminUtils::sudo_set_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_mechanism_count( <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok )); // Cannot set emission split with wrong number of entries - // With two subsubnets the size of the split vector should be 2, not 3 + // With two mechanisms the size of the split vector should be 2, not 3 assert_noop!( - AdminUtils::sudo_set_subsubnet_emission_split( + AdminUtils::sudo_set_mechanism_emission_split( <::RuntimeOrigin>::signed(sn_owner), netuid, Some(vec![0xFFFF / 5 * 2, 0xFFFF / 5 * 2, 0xFFFF / 5]) @@ -2392,7 +2392,7 @@ fn test_sudo_set_subsubnet_count_and_emissions() { // Cannot set emission split with wrong total of entries // Split vector entries should sum up to exactly 0xFFFF assert_noop!( - AdminUtils::sudo_set_subsubnet_emission_split( + AdminUtils::sudo_set_mechanism_emission_split( <::RuntimeOrigin>::signed(sn_owner), netuid, Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5 - 1]) @@ -2401,9 +2401,9 @@ fn test_sudo_set_subsubnet_count_and_emissions() { ); // Can set good split ok - // We also verify here that it can happen in the same block as setting subsubnet counts + // We also verify here that it can happen in the same block as setting mechanism counts // or soon, without rate limiting - assert_ok!(AdminUtils::sudo_set_subsubnet_emission_split( + assert_ok!(AdminUtils::sudo_set_mechanism_emission_split( <::RuntimeOrigin>::signed(sn_owner), netuid, Some(vec![0xFFFF / 5, 0xFFFF / 5 * 4]) @@ -2411,7 +2411,7 @@ fn test_sudo_set_subsubnet_count_and_emissions() { // Cannot set it again due to rate limits assert_noop!( - AdminUtils::sudo_set_subsubnet_emission_split( + AdminUtils::sudo_set_mechanism_emission_split( <::RuntimeOrigin>::signed(sn_owner), netuid, Some(vec![0xFFFF / 5 * 4, 0xFFFF / 5]) @@ -2436,9 +2436,9 @@ fn test_trim_to_max_allowed_uids() { ImmuneOwnerUidsLimit::::insert(netuid, 2); // We set a low value here to make testing easier MinAllowedUids::::set(netuid, 4); - // We define 4 subsubnets - let subsubnet_count = SubId::from(4); - SubsubnetCountCurrent::::insert(netuid, subsubnet_count); + // We define 4 mechanisms + let mechanism_count = MechId::from(4); + MechanismCountCurrent::::insert(netuid, mechanism_count); // Add some neurons let max_n = 16; @@ -2480,9 +2480,9 @@ fn test_trim_to_max_allowed_uids() { ValidatorPermit::::insert(netuid, bool_values.clone()); Active::::insert(netuid, bool_values); - for subid in 0..subsubnet_count.into() { + for mecid in 0..mechanism_count.into() { let netuid_index = - SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); Incentive::::insert(netuid_index, values.clone()); LastUpdate::::insert(netuid_index, u64_values.clone()); } @@ -2515,9 +2515,9 @@ fn test_trim_to_max_allowed_uids() { } } - for subid in 0..subsubnet_count.into() { + for mecid in 0..mechanism_count.into() { let netuid_index = - SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); Weights::::insert(netuid_index, uid, weights.clone()); Bonds::::insert(netuid_index, uid, bonds.clone()); } @@ -2563,9 +2563,9 @@ fn test_trim_to_max_allowed_uids() { assert_eq!(ValidatorPermit::::get(netuid), expected_bools); assert_eq!(StakeWeight::::get(netuid), expected_values); - for subid in 0..subsubnet_count.into() { + for mecid in 0..mechanism_count.into() { let netuid_index = - SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); assert_eq!(Incentive::::get(netuid_index), expected_values); assert_eq!(LastUpdate::::get(netuid_index), expected_u64_values); } @@ -2574,9 +2574,9 @@ fn test_trim_to_max_allowed_uids() { for uid in new_max_n..max_n { assert!(!Keys::::contains_key(netuid, uid)); assert!(!BlockAtRegistration::::contains_key(netuid, uid)); - for subid in 0..subsubnet_count.into() { + for mecid in 0..mechanism_count.into() { let netuid_index = - SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); assert!(!Weights::::contains_key(netuid_index, uid)); assert!(!Bonds::::contains_key(netuid_index, uid)); } @@ -2610,9 +2610,9 @@ fn test_trim_to_max_allowed_uids() { // Ensure trimmed uids weights and bonds connections have been trimmed correctly for uid in 0..new_max_n { - for subid in 0..subsubnet_count.into() { + for mecid in 0..mechanism_count.into() { let netuid_index = - SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(subid)); + SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(mecid)); assert!( Weights::::get(netuid_index, uid) .iter() @@ -2638,7 +2638,7 @@ fn test_trim_to_max_allowed_uids() { NetUid::from(42), new_max_n ), - pallet_subtensor::Error::::SubNetworkDoesNotExist + pallet_subtensor::Error::::MechanismDoesNotExist ); // New max n less than lower bound diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index ea46695142..3ecdbf7464 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; +use subtensor_runtime_common::{NetUid, MechId, TaoCurrency}; use sp_api::ProvideRuntimeApi; @@ -78,7 +78,7 @@ pub trait SubtensorCustomApi { fn get_submetagraph( &self, netuid: NetUid, - subid: SubId, + mecid: MechId, at: Option, ) -> RpcResult>; #[method(name = "subnetInfo_getSubnetState")] @@ -96,7 +96,7 @@ pub trait SubtensorCustomApi { fn get_selective_submetagraph( &self, netuid: NetUid, - subid: SubId, + mecid: MechId, metagraph_index: Vec, at: Option, ) -> RpcResult>; @@ -382,12 +382,12 @@ where fn get_submetagraph( &self, netuid: NetUid, - subid: SubId, + mecid: MechId, at: Option<::Hash>, ) -> RpcResult> { let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); - match api.get_submetagraph(at, netuid, subid) { + match api.get_submetagraph(at, netuid, mecid) { Ok(result) => Ok(result.encode()), Err(e) => Err(Error::RuntimeError(format!( "Unable to get dynamic subnets info: {e:?}" @@ -475,14 +475,14 @@ where fn get_selective_submetagraph( &self, netuid: NetUid, - subid: SubId, + mecid: MechId, metagraph_index: Vec, at: Option<::Hash>, ) -> RpcResult> { let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); - match api.get_selective_submetagraph(at, netuid, subid, metagraph_index) { + match api.get_selective_submetagraph(at, netuid, mecid, metagraph_index) { Ok(result) => Ok(result.encode()), Err(e) => { Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 3ec76df45f..e25dcc535e 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, MechId, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs @@ -41,11 +41,11 @@ sp_api::decl_runtime_apis! { fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; fn get_all_submetagraphs() -> Vec>>; - fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option>; + fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; - fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option>; + fn get_selective_submetagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index d0c068303b..3ddcb79f88 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::{NetUid, SubId}; +use subtensor_runtime_common::{NetUid, MechId}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, @@ -44,10 +44,10 @@ impl Pallet { // Weights revealed must have been committed during epoch `cur_epoch - reveal_period`. let reveal_epoch = cur_epoch.saturating_sub(reveal_period); - // All subsubnets share the same epoch, so the reveal_period/reveal_epoch are also the same - // Reveal for all subsubnets - for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + // All mechanisms share the same epoch, so the reveal_period/reveal_epoch are also the same + // Reveal for all mechanisms + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); // Clean expired commits for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid_index) { @@ -58,7 +58,7 @@ impl Pallet { // No commits to reveal until at least epoch reveal_period. if cur_epoch < reveal_period { - log::trace!("Failed to reveal commit for subsubnet {netuid_index} Too early"); + log::trace!("Failed to reveal commit for mechanism {netuid_index} Too early"); return Ok(()); } @@ -75,7 +75,7 @@ impl Pallet { None => { // Round number used was not found on the chain. Skip this commit. log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." ); unrevealed.push_back(( who, @@ -92,7 +92,7 @@ impl Pallet { Ok(c) => c, Err(e) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" ); continue; } @@ -110,7 +110,7 @@ impl Pallet { Ok(s) => s, Err(e) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" ); continue; } @@ -122,7 +122,7 @@ impl Pallet { Ok(d) => d, Err(e) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" ); continue; } @@ -142,7 +142,7 @@ impl Pallet { } Ok(_) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" ); continue; } @@ -152,7 +152,7 @@ impl Pallet { Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), Err(_) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" ); continue; } @@ -166,7 +166,7 @@ impl Pallet { Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), Err(e) => { log::trace!( - "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" + "Failed to reveal commit for mechanism {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" ); continue; } @@ -180,13 +180,13 @@ impl Pallet { if let Err(e) = Self::do_set_sub_weights( T::RuntimeOrigin::signed(who.clone()), netuid, - SubId::from(subid), + MechId::from(mecid), uids, values, version_key, ) { log::trace!( - "Failed to `do_set_sub_weights` for subsubnet {netuid_index} submitted by {who:?}: {e:?}" + "Failed to `do_set_sub_weights` for mechanism {netuid_index} submitted by {who:?}: {e:?}" ); continue; } diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 796cf5614b..15b5d07d91 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -361,14 +361,14 @@ impl Pallet { /// * 'NetworkRemoved': Emitted when a network is successfully removed. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': If the specified network does not exist. + /// * 'MechanismDoesNotExist': If the specified network does not exist. /// * 'NotSubnetOwner': If the caller does not own the specified subnet. /// pub fn user_remove_network(coldkey: T::AccountId, netuid: NetUid) -> dispatch::DispatchResult { // --- 1. Ensure this subnet exists. ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // --- 2. Ensure the caller owns this subnet. @@ -409,7 +409,7 @@ impl Pallet { // --- 1. Return balance to subnet owner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let reserved_amount = Self::get_subnet_locked_balance(netuid); - let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); + let mechanisms: u8 = MechanismCountCurrent::::get(netuid).into(); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -427,14 +427,14 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); } // --- 7. Removes the weights for this subnet (do not remove). - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); } @@ -457,15 +457,15 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Incentive::::remove(netuid_index); } Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - for subid in 0..subsubnets { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); LastUpdate::::remove(netuid_index); } ValidatorPermit::::remove(netuid); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index d284f33eda..f1046fb682 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -648,7 +648,7 @@ impl Pallet { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch_with_subsubnets(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_mechanisms(netuid, pending_alpha.saturating_add(pending_swapped)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 3dfcf0ac05..4dd949d3eb 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -6,7 +6,7 @@ use safe_math::*; use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId}; #[derive(Debug, Default)] pub struct EpochTerms { @@ -65,11 +65,11 @@ impl Pallet { netuid: NetUid, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - // Run subsubnet-style epoch - let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); + // Run mechanism-style epoch + let output = Self::epoch_mechanism(netuid, MechId::MAIN, rao_emission); // Persist values in legacy format - Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, output.as_map()); + Self::persist_mechanism_epoch_terms(netuid, MechId::MAIN, output.as_map()); Self::persist_netuid_epoch_terms(netuid, output.as_map()); // Remap and return @@ -84,16 +84,16 @@ impl Pallet { netuid: NetUid, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - Self::epoch_dense_subsubnet(netuid, SubId::MAIN, rao_emission) + Self::epoch_dense_mechanism(netuid, MechId::MAIN, rao_emission) } /// Persists per-subsubnet epoch output in state - pub fn persist_subsub_epoch_terms( + pub fn persist_mechanism_epoch_terms( netuid: NetUid, - subid: SubId, + mecid: MechId, output: &BTreeMap, ) { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); terms_sorted.sort_unstable_by_key(|t| t.uid); @@ -150,13 +150,13 @@ impl Pallet { /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] - pub fn epoch_dense_subsubnet( + pub fn epoch_dense_mechanism( netuid: NetUid, - subid: SubId, + mecid: MechId, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { // Calculate netuid storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); @@ -585,13 +585,13 @@ impl Pallet { /// * 'debug' ( bool ): /// - Print debugging outputs. /// - pub fn epoch_subsubnet( + pub fn epoch_mechanism( netuid: NetUid, - subid: SubId, + mecid: MechId, rao_emission: AlphaCurrency, ) -> EpochOutput { // Calculate netuid storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // Initialize output keys (neuron hotkeys) and UIDs let mut terms_map: BTreeMap = Keys::::iter_prefix(netuid) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 00fab25ae4..f65191062b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -87,7 +87,7 @@ pub mod pallet { use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; use subtensor_runtime_common::{ - AlphaCurrency, Currency, NetUid, NetUidStorageIndex, SubId, TaoCurrency, + AlphaCurrency, Currency, NetUid, NetUidStorageIndex, MechId, TaoCurrency, }; #[cfg(not(feature = "std"))] @@ -1833,36 +1833,36 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; - /// ====================== - /// ==== Sub-subnets ===== - /// ====================== + /// ============================ + /// ==== Subnet Mechanisms ===== + /// ============================ #[pallet::type_value] /// -- ITEM (Default number of sub-subnets) - pub fn DefaultSubsubnetCount() -> SubId { - SubId::from(1) + pub fn DefaultMechanismCount() -> MechId { + MechId::from(1) } #[pallet::type_value] /// -- ITEM (Maximum number of sub-subnets) - pub fn MaxSubsubnetCount() -> SubId { - SubId::from(8) + pub fn MaxMechanismCount() -> MechId { + MechId::from(8) } #[pallet::type_value] /// -- ITEM (Rate limit for subsubnet count updates) - pub fn SubsubnetCountSetRateLimit() -> u64 { + pub fn MechanismCountSetRateLimit() -> u64 { prod_or_fast!(7_200, 1) } #[pallet::type_value] /// -- ITEM (Rate limit for subsubnet emission distribution updates) - pub fn SubsubnetEmissionRateLimit() -> u64 { + pub fn MechanismEmissionRateLimit() -> u64 { prod_or_fast!(7_200, 1) } #[pallet::storage] - /// --- MAP ( netuid ) --> Current number of sub-subnets - pub type SubsubnetCountCurrent = - StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; + /// --- MAP ( netuid ) --> Current number of subnet mechanisms + pub type MechanismCountCurrent = + StorageMap<_, Twox64Concat, NetUid, MechId, ValueQuery, DefaultMechanismCount>; #[pallet::storage] - /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subsubnets - pub type SubsubnetEmissionSplit = + /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subnet mechanisms + pub type MechanismEmissionSplit = StorageMap<_, Twox64Concat, NetUid, Vec, OptionQuery>; /// ================== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index f4583c0711..5dd3cb1d39 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -53,7 +53,7 @@ mod dispatches { /// - On successfully setting the weights on chain. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -94,7 +94,7 @@ mod dispatches { } } - /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call + /// --- Sets the caller weights for the incentive mechanism for mechanisms. The call /// can be made from the hotkey account so is potentially insecure, however, the damage /// of changing weights is minimal if caught early. This function includes all the /// checks that the passed weights meet the requirements. Stored as u16s they represent @@ -114,8 +114,8 @@ mod dispatches { /// * `netuid` (u16): /// - The network uid we are setting these weights on. /// - /// * `subid` (`u8`): - /// - The u8 subsubnet identifier. + /// * `mecid` (`u8`): + /// - The u8 mechnism identifier. /// /// * `dests` (Vec): /// - The edge endpoint for the weight, i.e. j for w_ij. @@ -132,7 +132,7 @@ mod dispatches { /// - On successfully setting the weights on chain. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -162,7 +162,7 @@ mod dispatches { pub fn set_sub_weights( origin: OriginFor, netuid: NetUid, - subid: SubId, + mecid: MechId, dests: Vec, weights: Vec, version_key: u64, @@ -170,7 +170,7 @@ mod dispatches { if Self::get_commit_reveal_weights_enabled(netuid) { Err(Error::::CommitRevealEnabled.into()) } else { - Self::do_set_sub_weights(origin, netuid, subid, dests, weights, version_key) + Self::do_set_sub_weights(origin, netuid, mecid, dests, weights, version_key) } } @@ -243,7 +243,7 @@ mod dispatches { Self::do_commit_weights(origin, netuid, commit_hash) } - /// ---- Used to commit a hash of your weight values to later be revealed for subsubnets. + /// ---- Used to commit a hash of your weight values to later be revealed for mechanisms. /// /// # Args: /// * `origin`: (`::RuntimeOrigin`): @@ -252,8 +252,8 @@ mod dispatches { /// * `netuid` (`u16`): /// - The u16 network identifier. /// - /// * `subid` (`u8`): - /// - The u8 subsubnet identifier. + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. /// /// * `commit_hash` (`H256`): /// - The hash representing the committed weights. @@ -272,10 +272,10 @@ mod dispatches { pub fn commit_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit_hash: H256, ) -> DispatchResult { - Self::do_commit_sub_weights(origin, netuid, subid, commit_hash) + Self::do_commit_sub_weights(origin, netuid, mecid, commit_hash) } /// --- Allows a hotkey to commit weight hashes for multiple netuids as a batch. @@ -364,7 +364,7 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } - /// ---- Used to reveal the weights for a previously committed hash for subsubnets. + /// ---- Used to reveal the weights for a previously committed hash for mechanisms. /// /// # Args: /// * `origin`: (`::RuntimeOrigin`): @@ -373,8 +373,8 @@ mod dispatches { /// * `netuid` (`u16`): /// - The u16 network identifier. /// - /// * `subid` (`u8`): - /// - The u8 subsubnet identifier. + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. /// /// * `uids` (`Vec`): /// - The uids for the weights being revealed. @@ -411,13 +411,13 @@ mod dispatches { pub fn reveal_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, uids: Vec, values: Vec, salt: Vec, version_key: u64, ) -> DispatchResult { - Self::do_reveal_sub_weights(origin, netuid, subid, uids, values, salt, version_key) + Self::do_reveal_sub_weights(origin, netuid, mecid, uids, values, salt, version_key) } /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. @@ -462,7 +462,7 @@ mod dispatches { Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) } - /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed for subsubnets. + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed for mechanisms. /// /// # Args: /// * `origin`: (`::RuntimeOrigin`): @@ -471,8 +471,8 @@ mod dispatches { /// * `netuid` (`u16`): /// - The u16 network identifier. /// - /// * `subid` (`u8`): - /// - The u8 subsubnet identifier. + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. /// /// * `commit` (`Vec`): /// - The encrypted compressed commit. @@ -501,11 +501,11 @@ mod dispatches { pub fn commit_crv3_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit: BoundedVec>, reveal_round: u64, ) -> DispatchResult { - Self::do_commit_timelocked_sub_weights(origin, netuid, subid, commit, reveal_round, 4) + Self::do_commit_timelocked_sub_weights(origin, netuid, mecid, commit, reveal_round, 4) } /// ---- The implementation for batch revealing committed weights. @@ -782,7 +782,7 @@ mod dispatches { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -866,7 +866,7 @@ mod dispatches { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -975,7 +975,7 @@ mod dispatches { /// - On successfully registering a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to register to a non existent network. /// /// * 'TooManyRegistrationsThisBlock': @@ -1366,7 +1366,7 @@ mod dispatches { /// - On successfully registering a child to a hotkey. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. @@ -2304,8 +2304,8 @@ mod dispatches { /// * `netuid` (`u16`): /// - The u16 network identifier. /// - /// * `subid` (`u8`): - /// - The u8 subsubnet identifier. + /// * `mecid` (`u8`): + /// - The u8 mechanism identifier. /// /// * `commit` (`Vec`): /// - The encrypted compressed commit. @@ -2329,7 +2329,7 @@ mod dispatches { pub fn commit_timelocked_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit: BoundedVec>, reveal_round: u64, commit_reveal_version: u16, @@ -2337,7 +2337,7 @@ mod dispatches { Self::do_commit_timelocked_sub_weights( origin, netuid, - subid, + mecid, commit, reveal_round, commit_reveal_version, diff --git a/pallets/subtensor/src/macros/errors.rs b/pallets/subtensor/src/macros/errors.rs index ed6ca3c002..b8f62dea89 100644 --- a/pallets/subtensor/src/macros/errors.rs +++ b/pallets/subtensor/src/macros/errors.rs @@ -7,8 +7,6 @@ mod errors { #[derive(PartialEq)] #[pallet::error] pub enum Error { - /// The subnet does not exist. - SubNetworkDoesNotExist, /// The root network does not exist. RootNetworkDoesNotExist, /// The user is trying to serve an axon which is not of type 4 (IPv4) or 6 (IPv6). @@ -168,7 +166,7 @@ mod errors { TxChildkeyTakeRateLimitExceeded, /// Invalid identity. InvalidIdentity, - /// Trying to register a subnet into a mechanism that does not exist. + /// Subnet mechanism does not exist. MechanismDoesNotExist, /// Trying to unstake your lock amount. CannotUnstakeLock, diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 2d3ef32509..4d842d26b8 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -8,7 +8,7 @@ use pallet_commitments::GetCommitments; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -805,17 +805,17 @@ impl Pallet { metagraphs } - pub fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { - if Self::ensure_subsubnet_exists(netuid, subid).is_err() { + pub fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option> { + if Self::ensure_mechanism_exists(netuid, mecid).is_err() { return None; } // Get netuid metagraph let maybe_meta = Self::get_metagraph(netuid); if let Some(mut meta) = maybe_meta { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); - // Update with subsubnet information + // Update with mechanism information meta.netuid = NetUid::from(u16::from(netuid_index)).into(); meta.last_update = LastUpdate::::get(netuid_index) .into_iter() @@ -836,9 +836,9 @@ impl Pallet { let netuids = Self::get_all_subnet_netuids(); let mut metagraphs = Vec::>>::new(); for netuid in netuids.clone().iter() { - let subsub_count = u8::from(SubsubnetCountCurrent::::get(netuid)); - for subid in 0..subsub_count { - metagraphs.push(Self::get_submetagraph(*netuid, SubId::from(subid))); + let mechanism_count = u8::from(MechanismCountCurrent::::get(netuid)); + for mecid in 0..mechanism_count { + metagraphs.push(Self::get_submetagraph(*netuid, MechId::from(mecid))); } } metagraphs @@ -862,7 +862,7 @@ impl Pallet { pub fn get_selective_submetagraph( netuid: NetUid, - subid: SubId, + mecid: MechId, metagraph_indexes: Vec, ) -> Option> { if !Self::if_subnet_exist(netuid) { @@ -870,7 +870,7 @@ impl Pallet { } else { let mut result = SelectiveMetagraph::default(); for index in metagraph_indexes.iter() { - let value = Self::get_single_selective_submetagraph(netuid, subid, *index); + let value = Self::get_single_selective_submetagraph(netuid, mecid, *index); result.merge_value(&value, *index as usize); } Some(result) @@ -1443,12 +1443,12 @@ impl Pallet { fn get_single_selective_submetagraph( netuid: NetUid, - subid: SubId, + mecid: MechId, metagraph_index: u16, ) -> SelectiveMetagraph { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); - // Default to netuid, replace as needed for subid + // Default to netuid, replace as needed for mecid match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 4c1bbd0b9f..f803838743 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -25,7 +25,7 @@ impl Pallet { ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( @@ -93,7 +93,7 @@ impl Pallet { ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index c6c37f7e96..b4629ec54d 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -23,7 +23,7 @@ impl Pallet { /// - If all checks pass and setting the childkeys is scheduled. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. @@ -64,7 +64,7 @@ impl Pallet { // Check that the network we are trying to create the child on exists. ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Check that the coldkey owns the hotkey. @@ -148,7 +148,7 @@ impl Pallet { /// - On successfully registering children to a hotkey. /// /// # Errors: - /// * `SubNetworkDoesNotExist`: + /// * `MechanismDoesNotExist`: /// - Attempting to register to a non-existent network. /// * `RegistrationNotPermittedOnRootSubnet`: /// - Attempting to register a child on the root network. diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/mechanism.rs similarity index 80% rename from pallets/subtensor/src/subnets/subsubnet.rs rename to pallets/subtensor/src/subnets/mechanism.rs index 337bf809fd..862506aef5 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -6,7 +6,7 @@ use crate::epoch::run_epoch::EpochTerms; use alloc::collections::BTreeMap; use safe_math::*; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId}; pub type LeaseId = u32; @@ -27,12 +27,12 @@ pub type BalanceOf = /// pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; -// Theoretical maximum number of subsubnets per subnet -// GLOBAL_MAX_SUBNET_COUNT * MAX_SUBSUBNET_COUNT_PER_SUBNET should be 0x10000 -pub const MAX_SUBSUBNET_COUNT_PER_SUBNET: u8 = 16; +// Theoretical maximum number of mechanisms per subnet +// GLOBAL_MAX_SUBNET_COUNT * MAX_MECHANISM_COUNT_PER_SUBNET should be 0x10000 +pub const MAX_MECHANISM_COUNT_PER_SUBNET: u8 = 16; impl Pallet { - pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { + pub fn get_mechanism_storage_index(netuid: NetUid, sub_id: MechId) -> NetUidStorageIndex { u16::from(sub_id) .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) .saturating_add(u16::from(netuid)) @@ -41,7 +41,7 @@ impl Pallet { pub fn get_netuid_and_subid( netuid_index: NetUidStorageIndex, - ) -> Result<(NetUid, SubId), Error> { + ) -> Result<(NetUid, MechId), Error> { let maybe_netuid = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT); if let Some(netuid_u16) = maybe_netuid { let netuid = NetUid::from(netuid_u16); @@ -49,68 +49,68 @@ impl Pallet { // Make sure the base subnet exists ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Extract sub_id let sub_id_u8 = u8::try_from(u16::from(netuid_index).safe_div(GLOBAL_MAX_SUBNET_COUNT)) - .map_err(|_| Error::::SubNetworkDoesNotExist)?; - let sub_id = SubId::from(sub_id_u8); + .map_err(|_| Error::::MechanismDoesNotExist)?; + let sub_id = MechId::from(sub_id_u8); - if SubsubnetCountCurrent::::get(netuid) > sub_id { + if MechanismCountCurrent::::get(netuid) > sub_id { Ok((netuid, sub_id)) } else { - Err(Error::::SubNetworkDoesNotExist.into()) + Err(Error::::MechanismDoesNotExist.into()) } } else { - Err(Error::::SubNetworkDoesNotExist.into()) + Err(Error::::MechanismDoesNotExist.into()) } } - pub fn get_current_subsubnet_count(netuid: NetUid) -> SubId { - SubsubnetCountCurrent::::get(netuid) + pub fn get_current_mechanism_count(netuid: NetUid) -> MechId { + MechanismCountCurrent::::get(netuid) } - pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { + pub fn ensure_mechanism_exists(netuid: NetUid, sub_id: MechId) -> DispatchResult { // Make sure the base subnet exists ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); - // Make sure the subsub limit is not exceeded + // Make sure the mechanism limit is not exceeded ensure!( - SubsubnetCountCurrent::::get(netuid) > sub_id, - Error::::SubNetworkDoesNotExist + MechanismCountCurrent::::get(netuid) > sub_id, + Error::::MechanismDoesNotExist ); Ok(()) } /// Set the desired valus of sub-subnet count for a subnet identified /// by netuid - pub fn do_set_subsubnet_count(netuid: NetUid, subsubnet_count: SubId) -> DispatchResult { + pub fn do_set_mechanism_count(netuid: NetUid, mechanism_count: MechId) -> DispatchResult { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Count cannot be zero - ensure!(subsubnet_count > 0.into(), Error::::InvalidValue); + ensure!(mechanism_count > 0.into(), Error::::InvalidValue); // Make sure we are not exceeding the max sub-subnet count ensure!( - subsubnet_count <= MaxSubsubnetCount::::get(), + mechanism_count <= MaxMechanismCount::::get(), Error::::InvalidValue ); // Make sure we are not allowing numbers that will break the math ensure!( - subsubnet_count <= SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET), + mechanism_count <= MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET), Error::::InvalidValue ); - Self::update_subsubnet_counts_if_needed(netuid, subsubnet_count); + Self::update_mechanism_counts_if_needed(netuid, mechanism_count); Ok(()) } @@ -118,14 +118,14 @@ impl Pallet { /// Update current count for a subnet identified by netuid /// - Cleans up all sub-subnet maps if count is reduced /// - pub fn update_subsubnet_counts_if_needed(netuid: NetUid, new_count: SubId) { - let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + pub fn update_mechanism_counts_if_needed(netuid: NetUid, new_count: MechId) { + let old_count = u8::from(MechanismCountCurrent::::get(netuid)); let new_count_u8 = u8::from(new_count); if old_count != new_count_u8 { if old_count > new_count_u8 { - for subid in new_count_u8..old_count { + for mecid in new_count_u8..old_count { let netuid_index = - Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + Self::get_mechanism_storage_index(netuid, MechId::from(mecid)); // Cleanup Weights let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); @@ -148,10 +148,10 @@ impl Pallet { } } - SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + MechanismCountCurrent::::insert(netuid, MechId::from(new_count)); // Reset split back to even - SubsubnetEmissionSplit::::remove(netuid); + MechanismEmissionSplit::::remove(netuid); } } @@ -159,14 +159,14 @@ impl Pallet { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); if let Some(split) = maybe_split { // Check the length ensure!(!split.is_empty(), Error::::InvalidValue); ensure!( - split.len() <= u8::from(SubsubnetCountCurrent::::get(netuid)) as usize, + split.len() <= u8::from(MechanismCountCurrent::::get(netuid)) as usize, Error::::InvalidValue ); @@ -174,20 +174,20 @@ impl Pallet { let total: u64 = split.iter().map(|s| *s as u64).sum(); ensure!(total == u16::MAX as u64, Error::::InvalidValue); - SubsubnetEmissionSplit::::insert(netuid, split); + MechanismEmissionSplit::::insert(netuid, split); } else { - SubsubnetEmissionSplit::::remove(netuid); + MechanismEmissionSplit::::remove(netuid); } Ok(()) } /// Split alpha emission in sub-subnet proportions - /// stored in SubsubnetEmissionSplit + /// stored in MechanismEmissionSplit /// pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { - let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); - let maybe_split = SubsubnetEmissionSplit::::get(netuid); + let mechanism_count = u64::from(MechanismCountCurrent::::get(netuid)); + let maybe_split = MechanismEmissionSplit::::get(netuid); // Unset split means even distribution let mut result: Vec = if let Some(split) = maybe_split { @@ -202,16 +202,16 @@ impl Pallet { }) .collect() } else { - let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); - vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize] + let per_mechanism = u64::from(alpha).safe_div(mechanism_count); + vec![AlphaCurrency::from(per_mechanism); mechanism_count as usize] }; - // Trim / extend and pad with zeroes if result is shorter than subsubnet_count - if result.len() != subsubnet_count as usize { - result.resize(subsubnet_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) + // Trim / extend and pad with zeroes if result is shorter than mechanism_count + if result.len() != mechanism_count as usize { + result.resize(mechanism_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) } - // If there's any rounding error or lost due to truncation emission, credit it to subsubnet 0 + // If there's any rounding error or lost due to truncation emission, credit it to mechanism 0 let rounding_err = u64::from(alpha).saturating_sub(result.iter().map(|s| u64::from(*s)).sum()); if let Some(cell) = result.first_mut() { @@ -242,7 +242,7 @@ impl Pallet { /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission /// into a single vector. /// - pub fn epoch_with_subsubnets( + pub fn epoch_with_mechanisms( netuid: NetUid, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { @@ -250,18 +250,18 @@ impl Pallet { Self::split_emissions(netuid, rao_emission) .into_iter() .enumerate() - // Run epoch function for each subsubnet to distribute its portion of emissions + // Run epoch function for each mechanism to distribute its portion of emissions .flat_map(|(sub_id_usize, sub_emission)| { let sub_id_u8: u8 = sub_id_usize.try_into().unwrap_or_default(); - let sub_id = SubId::from(sub_id_u8); + let sub_id = MechId::from(sub_id_u8); - // Run epoch function on the subsubnet emission - let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); - Self::persist_subsub_epoch_terms(netuid, sub_id, epoch_output.as_map()); + // Run epoch function on the mechanism emission + let epoch_output = Self::epoch_mechanism(netuid, sub_id, sub_emission); + Self::persist_mechanism_epoch_terms(netuid, sub_id, epoch_output.as_map()); - // Calculate subsubnet weight from the split emission (not the other way because preserving + // Calculate mechanism weight from the split emission (not the other way because preserving // emission accuracy is the priority) - // For zero emission the first subsubnet gets full weight + // For zero emission the first mechanism gets full weight let sub_weight = U64F64::saturating_from_num(sub_emission).safe_div_or( U64F64::saturating_from_num(rao_emission), U64F64::saturating_from_num(if sub_id_u8 == 0 { 1 } else { 0 }), @@ -277,7 +277,7 @@ impl Pallet { .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { acc.entry(hotkey) .and_modify(|acc_terms| { - // Server and validator emission come from subsubnet emission and need to be added up + // Server and validator emission come from mechanism emission and need to be added up acc_terms.validator_emission = acc_terms .validator_emission .saturating_add(terms.validator_emission); diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs index a3705af084..e93628eef4 100644 --- a/pallets/subtensor/src/subnets/mod.rs +++ b/pallets/subtensor/src/subnets/mod.rs @@ -1,9 +1,9 @@ use super::*; pub mod leasing; +pub mod mechanism; pub mod registration; pub mod serving; pub mod subnet; -pub mod subsubnet; pub mod symbols; pub mod uids; pub mod weights; diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index a620755f2e..90ba2ea1aa 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -55,7 +55,7 @@ impl Pallet { /// - On successfully registereing a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to registed to a non existent network. /// /// * 'TooManyRegistrationsThisBlock': @@ -80,7 +80,7 @@ impl Pallet { ); ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // --- 3. Ensure the passed network allows registrations. @@ -193,7 +193,7 @@ impl Pallet { /// - On successfully registereing a uid to a neuron slot on a subnetwork. /// /// # Raises: - /// *'SubNetworkDoesNotExist': + /// *'MechanismDoesNotExist': /// - Attempting to registed to a non existent network. /// /// *'TooManyRegistrationsThisBlock': @@ -238,7 +238,7 @@ impl Pallet { ); ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // --- 3. Ensure the passed network allows registrations. diff --git a/pallets/subtensor/src/subnets/serving.rs b/pallets/subtensor/src/subnets/serving.rs index ae1c97cc7c..cdaf39e51b 100644 --- a/pallets/subtensor/src/subnets/serving.rs +++ b/pallets/subtensor/src/subnets/serving.rs @@ -40,7 +40,7 @@ impl Pallet { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -144,7 +144,7 @@ impl Pallet { /// - On successfully serving the axon info. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 6241c54ef7..7f9d11d7dc 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -311,7 +311,7 @@ impl Pallet { /// /// # Raises /// - /// * `Error::::SubNetworkDoesNotExist`: If the subnet does not exist. + /// * `Error::::MechanismDoesNotExist`: If the subnet does not exist. /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. /// @@ -321,7 +321,7 @@ impl Pallet { pub fn do_start_call(origin: T::RuntimeOrigin, netuid: NetUid) -> DispatchResult { ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); Self::ensure_subnet_owner(origin, netuid)?; ensure!( diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index e01f17cad6..cf639f9fbf 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -25,8 +25,8 @@ impl Pallet { Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. @@ -114,8 +114,8 @@ impl Pallet { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Incentive::::mutate(netuid_index, |v| v.push(0)); LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); } @@ -135,7 +135,7 @@ impl Pallet { // Reasonable limits ensure!( Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( max_n >= MinAllowedUids::::get(netuid), @@ -178,7 +178,7 @@ impl Pallet { let mut removed_uids = BTreeSet::new(); let mut uids_left_to_process = current_n; - let subsubnets_count = SubsubnetCountCurrent::::get(netuid).into(); + let mechanisms_count = MechanismCountCurrent::::get(netuid).into(); // Iterate from the end (lowest emitters) to the beginning for i in (0..current_n).rev() { @@ -212,8 +212,8 @@ impl Pallet { #[allow(unknown_lints)] Keys::::remove(netuid, neuron_uid); BlockAtRegistration::::remove(netuid, neuron_uid); - for subid in 0..subsubnets_count { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Weights::::remove(netuid_index, neuron_uid); Bonds::::remove(netuid_index, neuron_uid); } @@ -282,9 +282,9 @@ impl Pallet { ValidatorPermit::::insert(netuid, trimmed_vpermit); StakeWeight::::insert(netuid, trimmed_stake_weight); - // Update incentives/lastupdates for subsubnets - for subid in 0..subsubnets_count { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + // Update incentives/lastupdates for mechanisms + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); let incentive = Incentive::::get(netuid_index); let lastupdate = LastUpdate::::get(netuid_index); let mut trimmed_incentive = Vec::with_capacity(trimmed_uids.len()); @@ -320,8 +320,8 @@ impl Pallet { Keys::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); BlockAtRegistration::::swap(netuid, old_neuron_uid, netuid, new_neuron_uid); - for subid in 0..subsubnets_count { - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + for mecid in 0..mechanisms_count { + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); // Swap to new position and remap all target uids Weights::::swap(netuid_index, old_neuron_uid, netuid_index, new_neuron_uid); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index b751630d85..08cc3724d4 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -10,7 +10,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, MechId}; impl Pallet { /// ---- The implementation for committing weight hashes. @@ -46,29 +46,29 @@ impl Pallet { netuid: NetUid, commit_hash: H256, ) -> DispatchResult { - Self::internal_commit_weights(origin, netuid, SubId::MAIN, commit_hash) + Self::internal_commit_weights(origin, netuid, MechId::MAIN, commit_hash) } pub fn do_commit_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit_hash: H256, ) -> DispatchResult { - Self::internal_commit_weights(origin, netuid, subid, commit_hash) + Self::internal_commit_weights(origin, netuid, mecid, commit_hash) } fn internal_commit_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit_hash: H256, ) -> DispatchResult { - // Ensure netuid and subid exist - Self::ensure_subsubnet_exists(netuid, subid)?; + // Ensure netuid and mecid exist + Self::ensure_mechanism_exists(netuid, mecid)?; // Calculate subnet storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -267,7 +267,7 @@ impl Pallet { Self::internal_commit_timelocked_weights( origin, netuid, - SubId::MAIN, + MechId::MAIN, commit, reveal_round, commit_reveal_version, @@ -277,7 +277,7 @@ impl Pallet { pub fn do_commit_timelocked_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit: BoundedVec>, reveal_round: u64, commit_reveal_version: u16, @@ -285,7 +285,7 @@ impl Pallet { Self::internal_commit_timelocked_weights( origin, netuid, - subid, + mecid, commit, reveal_round, commit_reveal_version, @@ -295,16 +295,16 @@ impl Pallet { pub fn internal_commit_timelocked_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, commit: BoundedVec>, reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { - // Ensure netuid and subid exist - Self::ensure_subsubnet_exists(netuid, subid)?; + // Ensure netuid and mecid exist + Self::ensure_mechanism_exists(netuid, mecid)?; // Calculate netuid storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -425,32 +425,32 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { - Self::internal_reveal_weights(origin, netuid, SubId::MAIN, uids, values, salt, version_key) + Self::internal_reveal_weights(origin, netuid, MechId::MAIN, uids, values, salt, version_key) } pub fn do_reveal_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, uids: Vec, values: Vec, salt: Vec, version_key: u64, ) -> DispatchResult { - Self::internal_reveal_weights(origin, netuid, subid, uids, values, salt, version_key) + Self::internal_reveal_weights(origin, netuid, mecid, uids, values, salt, version_key) } fn internal_reveal_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, uids: Vec, values: Vec, salt: Vec, version_key: u64, ) -> DispatchResult { // Calculate netuid storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; @@ -528,7 +528,7 @@ impl Pallet { Self::do_set_sub_weights( origin, netuid, - subid, + mecid, uids.clone(), values.clone(), version_key, @@ -603,7 +603,7 @@ impl Pallet { version_keys: Vec, ) -> DispatchResult { // Calculate netuid storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::MAIN); + let netuid_index = Self::get_mechanism_storage_index(netuid, MechId::MAIN); // --- 1. Check that the input lists are of the same length. let num_reveals = uids_list.len(); @@ -740,13 +740,13 @@ impl Pallet { fn internal_set_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { // Calculate subnet storage index - let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let netuid_index = Self::get_mechanism_storage_index(netuid, mecid); // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; @@ -764,7 +764,7 @@ impl Pallet { ); // --- 3. Check to see if this is a valid network and sub-subnet. - Self::ensure_subsubnet_exists(netuid, subid)?; + Self::ensure_mechanism_exists(netuid, mecid)?; // --- 4. Check to see if the number of uids is within the max allowed uids for this network. ensure!( @@ -875,7 +875,7 @@ impl Pallet { /// - On successfully setting the weights on chain. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -915,7 +915,7 @@ impl Pallet { values: Vec, version_key: u64, ) -> dispatch::DispatchResult { - Self::internal_set_weights(origin, netuid, SubId::MAIN, uids, values, version_key) + Self::internal_set_weights(origin, netuid, MechId::MAIN, uids, values, version_key) } /// ---- The implementation for the extrinsic set_weights. @@ -927,7 +927,7 @@ impl Pallet { /// * 'netuid' (u16): /// - The u16 network identifier. /// - /// * 'subid' (u8): + /// * 'mecid' (u8): /// - The u8 identifier of sub-subnet. /// /// * 'uids' ( Vec ): @@ -944,7 +944,7 @@ impl Pallet { /// - On successfully setting the weights on chain. /// /// # Raises: - /// * 'SubNetworkDoesNotExist': + /// * 'MechanismDoesNotExist': /// - Attempting to set weights on a non-existent network. /// /// * 'NotRegistered': @@ -980,12 +980,12 @@ impl Pallet { pub fn do_set_sub_weights( origin: T::RuntimeOrigin, netuid: NetUid, - subid: SubId, + mecid: MechId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { - Self::internal_set_weights(origin, netuid, subid, uids, values, version_key) + Self::internal_set_weights(origin, netuid, mecid, uids, values, version_key) } /// ---- The implementation for the extrinsic batch_set_weights. @@ -1113,7 +1113,7 @@ impl Pallet { } } - // --- 3. Non registered peers cant pass. Neither can non-existing subid + // --- 3. Non registered peers cant pass. Neither can non-existing mecid false } diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 19737f765c..fdee2182ba 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid, SubId}; +use subtensor_runtime_common::{Currency, NetUid, MechId}; impl Pallet { /// Swaps the hotkey of a coldkey account. @@ -411,8 +411,8 @@ impl Pallet { // 3.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { - for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { - let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + for mecid in 0..MechanismCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_mechanism_storage_index(netuid, MechId::from(mecid)); if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid_index, old_hotkey) { diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 67dfe47fbe..cf11cf6190 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -62,7 +62,7 @@ fn test_do_set_child_singular_network_does_not_exist() { netuid, vec![(proportion, child)] ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -328,7 +328,7 @@ fn test_add_singular_child() { netuid, vec![(u64::MAX, child)] ), - Err(Error::::SubNetworkDoesNotExist.into()) + Err(Error::::MechanismDoesNotExist.into()) ); add_network(netuid, 1, 0); step_rate_limit(&TransactionType::SetChildren, netuid); @@ -472,7 +472,7 @@ fn test_do_set_empty_children_network_does_not_exist() { netuid, vec![] ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -601,7 +601,7 @@ fn test_do_schedule_children_multiple_network_does_not_exist() { netuid, vec![(proportion, child1)] ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -1200,7 +1200,7 @@ fn test_do_revoke_children_multiple_network_does_not_exist() { netuid, vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/mechanism.rs similarity index 80% rename from pallets/subtensor/src/tests/subsubnet.rs rename to pallets/subtensor/src/tests/mechanism.rs index 8b128a7241..3ab6ad785a 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/mechanism.rs @@ -5,39 +5,39 @@ )] // Run all tests -// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::subsubnet --show-output +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::mechanism --show-output // Test plan: -// - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [x] Netuid index math (with MechanismCountCurrent limiting) // - [x] Sub-subnet validity tests // - [x] do_set_desired tests // - [x] Emissions are split proportionally // - [x] Sum of split emissions is equal to rao_emission passed to epoch -// - [x] Only subnet owner or root can set desired subsubnet count (pallet admin test) -// - [x] Weights can be set by subsubnet -// - [x] Weights can be commited/revealed by subsubnet -// - [x] Weights can be commited/revealed in crv3 by subsubnet -// - [x] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force -// - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force -// - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force -// - [x] When a miner is deregistered, their weights are cleaned across all subsubnets -// - [x] Weight setting rate limiting is enforced by subsubnet -// - [x] Bonds are applied per subsubnet -// - [x] Incentives are per subsubnet -// - [x] Per-subsubnet incentives are distributed proportionally to miner weights -// - [x] Subsubnet limit can be set up to 8 (with admin pallet) -// - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared -// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [x] Only subnet owner or root can set desired mechanism count (pallet admin test) +// - [x] Weights can be set by mechanism +// - [x] Weights can be commited/revealed by mechanism +// - [x] Weights can be commited/revealed in crv3 by mechanism +// - [x] Prevent weight setting/commitment/revealing above mechanism_limit_in_force +// - [x] Prevent weight commitment/revealing above mechanism_limit_in_force +// - [x] Prevent weight commitment/revealing in crv3 above mechanism_limit_in_force +// - [x] When a miner is deregistered, their weights are cleaned across all mechanisms +// - [x] Weight setting rate limiting is enforced by mechanism +// - [x] Bonds are applied per mechanism +// - [x] Incentives are per mechanism +// - [x] Per-mechanism incentives are distributed proportionally to miner weights +// - [x] Mechanism limit can be set up to 8 (with admin pallet) +// - [x] When reduction of mechanism limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all mechanism epoch terms // - [x] Subnet epoch terms persist in state -// - [x] Subsubnet epoch terms persist in state -// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake -// - [x] Miner with no weights on any subsubnet receives no reward -// - [x] SubsubnetEmissionSplit is reset on subsubnet count increase -// - [x] SubsubnetEmissionSplit is reset on subsubnet count decrease +// - [x] Mechanism epoch terms persist in state +// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a mechanism), emission distributed by stake +// - [x] Miner with no weights on any mechanism receives no reward +// - [x] MechanismEmissionSplit is reset on mechanism count increase +// - [x] MechanismEmissionSplit is reset on mechanism count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; -use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; +use crate::subnets::mechanism::{GLOBAL_MAX_SUBNET_COUNT, MAX_MECHANISM_COUNT_PER_SUBNET}; use crate::*; use alloc::collections::BTreeMap; use approx::assert_abs_diff_eq; @@ -52,7 +52,7 @@ use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::{I32F32, U64F64}; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, MechId}; use tle::{ curves::drand::TinyBLS381, ibe::fullident::Identity, stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, @@ -75,9 +75,9 @@ fn test_index_from_netuid_and_subnet() { ] .iter() .for_each(|(netuid, sub_id)| { - let idx = SubtensorModule::get_subsubnet_storage_index( + let idx = SubtensorModule::get_mechanism_storage_index( NetUid::from(*netuid), - SubId::from(*sub_id), + MechId::from(*sub_id), ); let expected = *sub_id as u64 * GLOBAL_MAX_SUBNET_COUNT as u64 + *netuid as u64; assert_eq!(idx, NetUidStorageIndex::from(expected as u16)); @@ -109,16 +109,16 @@ fn test_netuid_and_subnet_from_index() { // Allow subnet ID NetworksAdded::::insert(NetUid::from(expected_netuid), true); - SubsubnetCountCurrent::::insert( + MechanismCountCurrent::::insert( NetUid::from(expected_netuid), - SubId::from(expected_subid + 1), + MechId::from(expected_subid + 1), ); - let (netuid, subid) = + let (netuid, mecid) = SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) .unwrap(); assert_eq!(netuid, NetUid::from(expected_netuid)); - assert_eq!(subid, SubId::from(expected_subid)); + assert_eq!(mecid, MechId::from(expected_subid)); }); }); } @@ -126,98 +126,98 @@ fn test_netuid_and_subnet_from_index() { #[test] fn test_netuid_index_math_constants() { assert_eq!( - GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_SUBSUBNET_COUNT_PER_SUBNET as u64, + GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_MECHANISM_COUNT_PER_SUBNET as u64, 0x10000 ); } #[test] -fn ensure_subsubnet_exists_ok() { +fn ensure_mechanism_exists_ok() { new_test_ext(1).execute_with(|| { let netuid: NetUid = 3u16.into(); - let sub_id = SubId::from(1u8); + let sub_id = MechId::from(1u8); // ensure base subnet exists NetworksAdded::::insert(NetUid::from(netuid), true); // Allow at least 2 sub-subnets (so sub_id = 1 is valid) - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); - assert_ok!(SubtensorModule::ensure_subsubnet_exists(netuid, sub_id)); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + assert_ok!(SubtensorModule::ensure_mechanism_exists(netuid, sub_id)); }); } #[test] -fn ensure_subsubnet_fails_when_base_subnet_missing() { +fn ensure_mechanism_fails_when_base_subnet_missing() { new_test_ext(1).execute_with(|| { let netuid: NetUid = 7u16.into(); - let sub_id = SubId::from(0u8); + let sub_id = MechId::from(0u8); // Intentionally DO NOT create the base subnet assert_noop!( - SubtensorModule::ensure_subsubnet_exists(netuid, sub_id), - Error::::SubNetworkDoesNotExist + SubtensorModule::ensure_mechanism_exists(netuid, sub_id), + Error::::MechanismDoesNotExist ); }); } #[test] -fn ensure_subsubnet_fails_when_subid_out_of_range() { +fn ensure_mechanism_fails_when_subid_out_of_range() { new_test_ext(1).execute_with(|| { let netuid: NetUid = 9u16.into(); NetworksAdded::::insert(NetUid::from(netuid), true); // Current allowed sub-subnet count is 2 => valid sub_ids: {0, 1} - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // sub_id == 2 is out of range (must be < 2) - let sub_id_eq = SubId::from(2u8); + let sub_id_eq = MechId::from(2u8); assert_noop!( - SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_eq), - Error::::SubNetworkDoesNotExist + SubtensorModule::ensure_mechanism_exists(netuid, sub_id_eq), + Error::::MechanismDoesNotExist ); // sub_id > 2 is also out of range - let sub_id_gt = SubId::from(3u8); + let sub_id_gt = MechId::from(3u8); assert_noop!( - SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_gt), - Error::::SubNetworkDoesNotExist + SubtensorModule::ensure_mechanism_exists(netuid, sub_id_gt), + Error::::MechanismDoesNotExist ); }); } #[test] -fn do_set_subsubnet_count_ok_minimal() { +fn do_set_mechanism_count_ok_minimal() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(3u16); NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists - assert_ok!(SubtensorModule::do_set_subsubnet_count( + assert_ok!(SubtensorModule::do_set_mechanism_count( netuid, - SubId::from(1u8) + MechId::from(1u8) )); - assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1u8)); + assert_eq!(MechanismCountCurrent::::get(netuid), MechId::from(1u8)); }); } #[test] -fn do_set_subsubnet_count_ok_at_effective_cap() { +fn do_set_mechanism_count_ok_at_effective_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(4u16); NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists // Effective bound is min(runtime cap, compile-time cap) - let runtime_cap = MaxSubsubnetCount::::get(); // e.g., SubId::from(8) - let compile_cap = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET); + let runtime_cap = MaxMechanismCount::::get(); // e.g., MechId::from(8) + let compile_cap = MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET); let bound = if runtime_cap <= compile_cap { runtime_cap } else { compile_cap }; - assert_ok!(SubtensorModule::do_set_subsubnet_count(netuid, bound)); - assert_eq!(SubsubnetCountCurrent::::get(netuid), bound); + assert_ok!(SubtensorModule::do_set_mechanism_count(netuid, bound)); + assert_eq!(MechanismCountCurrent::::get(netuid), bound); }); } @@ -228,8 +228,8 @@ fn do_set_fails_when_base_subnet_missing() { // No NetworksAdded insert => base subnet absent assert_noop!( - SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(1u8)), - Error::::SubNetworkDoesNotExist + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(1u8)), + Error::::MechanismDoesNotExist ); }); } @@ -241,7 +241,7 @@ fn do_set_fails_for_zero() { NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists assert_noop!( - SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(0u8)), + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(0u8)), Error::::InvalidValue ); }); @@ -255,7 +255,7 @@ fn do_set_fails_when_over_runtime_cap() { // Runtime cap is 8 (per function), so 9 must fail assert_noop!( - SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(9u8)), + SubtensorModule::do_set_mechanism_count(netuid, MechId::from(9u8)), Error::::InvalidValue ); }); @@ -267,16 +267,16 @@ fn do_set_fails_when_over_compile_time_cap() { let netuid = NetUid::from(12u16); NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists - let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); + let too_big = MechId::from(MAX_MECHANISM_COUNT_PER_SUBNET + 1); assert_noop!( - SubtensorModule::do_set_subsubnet_count(netuid, too_big), + SubtensorModule::do_set_mechanism_count(netuid, too_big), Error::::InvalidValue ); }); } #[test] -fn update_subsubnet_counts_decreases_and_cleans() { +fn update_mechanism_counts_decreases_and_cleans() { new_test_ext(1).execute_with(|| { let hotkey = U256::from(1); @@ -285,16 +285,16 @@ fn update_subsubnet_counts_decreases_and_cleans() { NetworksAdded::::insert(NetUid::from(42u16), true); // Choose counts so result is deterministic. - let old = SubId::from(3); - let desired = SubId::from(2u8); - SubsubnetCountCurrent::::insert(netuid, old); + let old = MechId::from(3); + let desired = MechId::from(2u8); + MechanismCountCurrent::::insert(netuid, old); // Set non-default subnet emission split - SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + MechanismEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); - // Seed data at a kept subid (1) and a removed subid (2) - let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1u8)); - let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + // Seed data at a kept mecid (1) and a removed mecid (2) + let idx_keep = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1u8)); + let idx_rm3 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(2u8)); Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); Incentive::::insert(idx_keep, vec![1u16]); @@ -327,10 +327,10 @@ fn update_subsubnet_counts_decreases_and_cleans() { ); // Act - SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); + SubtensorModule::update_mechanism_counts_if_needed(netuid, desired); // New count is as desired - assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); + assert_eq!(MechanismCountCurrent::::get(netuid), desired); // Kept prefix intact assert_eq!(Incentive::::get(idx_keep), vec![1u16]); @@ -342,7 +342,7 @@ fn update_subsubnet_counts_decreases_and_cleans() { idx_keep, 1u64 )); - // Removed prefix (subid 3) cleared + // Removed prefix (mecid 3) cleared assert!(Weights::::iter_prefix(idx_rm3).next().is_none()); assert_eq!(Incentive::::get(idx_rm3), Vec::::new()); assert!(!LastUpdate::::contains_key(idx_rm3)); @@ -352,34 +352,34 @@ fn update_subsubnet_counts_decreases_and_cleans() { idx_rm3, 1u64 )); - // SubsubnetEmissionSplit is reset - assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); + // MechanismEmissionSplit is reset + assert!(MechanismEmissionSplit::::get(netuid).is_none()); }); } #[test] -fn update_subsubnet_counts_increases() { +fn update_mechanism_counts_increases() { new_test_ext(1).execute_with(|| { // Base subnet exists let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); // Choose counts - let old = SubId::from(1u8); - let desired = SubId::from(2u8); - SubsubnetCountCurrent::::insert(netuid, old); + let old = MechId::from(1u8); + let desired = MechId::from(2u8); + MechanismCountCurrent::::insert(netuid, old); // Set non-default subnet emission split - SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + MechanismEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); // Act - SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); + SubtensorModule::update_mechanism_counts_if_needed(netuid, desired); // New count is as desired - assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); + assert_eq!(MechanismCountCurrent::::get(netuid), desired); - // SubsubnetEmissionSplit is reset - assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); + // MechanismEmissionSplit is reset + assert!(MechanismEmissionSplit::::get(netuid).is_none()); }); } @@ -387,7 +387,7 @@ fn update_subsubnet_counts_increases() { fn split_emissions_even_division() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(5u16); - SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + MechanismCountCurrent::::insert(netuid, MechId::from(5u8)); // 5 sub-subnets let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(25u64)); assert_eq!(out, vec![AlphaCurrency::from(5u64); 5]); }); @@ -397,7 +397,7 @@ fn split_emissions_even_division() { fn split_emissions_rounding_to_first() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(6u16); - SubsubnetCountCurrent::::insert(netuid, SubId::from(4u8)); // 4 sub-subnets + MechanismCountCurrent::::insert(netuid, MechId::from(4u8)); // 4 sub-subnets let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(10u64)); // 10 / 4 = 2, rem=2 assert_eq!( out, @@ -415,8 +415,8 @@ fn split_emissions_rounding_to_first() { fn split_emissions_fibbonacci() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(5u16); - SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets - SubsubnetEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); + MechanismCountCurrent::::insert(netuid, MechId::from(5u8)); // 5 sub-subnets + MechanismEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(19u64)); assert_eq!( out, @@ -431,16 +431,16 @@ fn split_emissions_fibbonacci() { }); } -/// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero +/// Seeds a 2-neuron and 2-mechanism subnet so `epoch_mechanism` produces non-zero /// incentives & dividends. /// Returns the sub-subnet storage index. pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U256) { - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); // Base subnet exists; 2 neurons. NetworksAdded::::insert(NetUid::from(u16::from(netuid)), true); - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); SubnetworkN::::insert(netuid, 2); // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. @@ -474,7 +474,7 @@ pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U2 StakeThreshold::::put(0u64); ValidatorPermit::::insert(netuid, vec![true, true]); - // Simple weights, setting for each other on both subsubnets + // Simple weights, setting for each other on both mechanisms Weights::::insert(idx0, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); Weights::::insert(idx0, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); @@ -486,8 +486,8 @@ pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U2 } pub fn mock_3_neurons(netuid: NetUid, hk: U256) { - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); SubnetworkN::::insert(netuid, 3); Keys::::insert(netuid, 2u16, hk); @@ -497,11 +497,11 @@ pub fn mock_3_neurons(netuid: NetUid, hk: U256) { } #[test] -fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { +fn epoch_with_mechanisms_produces_per_mechanism_incentive() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1u16); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); let ck0 = U256::from(1); let hk0 = U256::from(2); let ck1 = U256::from(3); @@ -509,7 +509,7 @@ fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { let emission = AlphaCurrency::from(1_000_000_000); mock_epoch_state(netuid, ck0, hk0, ck1, hk1); - SubtensorModule::epoch_with_subsubnets(netuid, emission); + SubtensorModule::epoch_with_mechanisms(netuid, emission); let actual_incentive_sub0 = Incentive::::get(idx0); let actual_incentive_sub1 = Incentive::::get(idx1); @@ -522,11 +522,11 @@ fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { } #[test] -fn epoch_with_subsubnets_updates_bonds() { +fn epoch_with_mechanisms_updates_bonds() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1u16); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); let ck0 = U256::from(1); let hk0 = U256::from(2); let ck1 = U256::from(3); @@ -535,33 +535,33 @@ fn epoch_with_subsubnets_updates_bonds() { mock_epoch_state(netuid, ck0, hk0, ck1, hk1); - // Cause bonds to be asymmetric on diff subsubnets + // Cause bonds to be asymmetric on diff mechanisms Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0)]); Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); - SubtensorModule::epoch_with_subsubnets(netuid, emission); + SubtensorModule::epoch_with_mechanisms(netuid, emission); let bonds_uid0_sub0 = Bonds::::get(idx0, 0); let bonds_uid1_sub0 = Bonds::::get(idx0, 1); let bonds_uid0_sub1 = Bonds::::get(idx1, 0); let bonds_uid1_sub1 = Bonds::::get(idx1, 1); - // Subsubnet 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 + // Mechanism 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 assert_eq!(bonds_uid0_sub0, vec![(1, 65535)]); assert_eq!(bonds_uid1_sub0, vec![(0, 65535)]); - // Subsubnet 1: UID0 no bond to UID1, UID1 fully bonds to UID0 + // Mechanism 1: UID0 no bond to UID1, UID1 fully bonds to UID0 assert_eq!(bonds_uid0_sub1, vec![]); assert_eq!(bonds_uid1_sub1, vec![(0, 65535)]); }); } #[test] -fn epoch_with_subsubnets_incentives_proportional_to_weights() { +fn epoch_with_mechanisms_incentives_proportional_to_weights() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1u16); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); let ck0 = U256::from(1); let hk0 = U256::from(2); let ck1 = U256::from(3); @@ -575,11 +575,11 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { // Need 3 neurons for this: One validator that will be setting weights to 2 miners ValidatorPermit::::insert(netuid, vec![true, false, false]); - // Set greater weight to uid1 on sub-subnet 0 and to uid2 on subsubnet 1 + // Set greater weight to uid1 on sub-subnet 0 and to uid2 on mechanism 1 Weights::::insert(idx0, 0, vec![(1u16, 0xFFFF / 5 * 4), (2u16, 0xFFFF / 5)]); Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); - SubtensorModule::epoch_with_subsubnets(netuid, emission); + SubtensorModule::epoch_with_mechanisms(netuid, emission); let actual_incentive_sub0 = Incentive::::get(idx0); let actual_incentive_sub1 = Incentive::::get(idx1); @@ -610,11 +610,11 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { } #[test] -fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { +fn epoch_with_mechanisms_persists_and_aggregates_all_terms() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1u16); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); // Three neurons: validator (uid=0) + two miners (uid=1,2) let ck0 = U256::from(1); @@ -632,10 +632,10 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { let uid2 = 2_usize; // Two sub-subnets with non-equal split (~25% / 75%) - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); let split0 = u16::MAX / 4; let split1 = u16::MAX - split0; - SubsubnetEmissionSplit::::insert(netuid, vec![split0, split1]); + MechanismEmissionSplit::::insert(netuid, vec![split0, split1]); // One validator; skew weights differently per sub-subnet ValidatorPermit::::insert(netuid, vec![true, false, false]); @@ -649,20 +649,20 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); // Per-sub emissions (and weights used for aggregation) - let subsubnet_emissions = SubtensorModule::split_emissions(netuid, emission); - let w0 = U64F64::from_num(u64::from(subsubnet_emissions[0])) + let mechanism_emissions = SubtensorModule::split_emissions(netuid, emission); + let w0 = U64F64::from_num(u64::from(mechanism_emissions[0])) / U64F64::from_num(u64::from(emission)); - let w1 = U64F64::from_num(u64::from(subsubnet_emissions[1])) + let w1 = U64F64::from_num(u64::from(mechanism_emissions[1])) / U64F64::from_num(u64::from(emission)); assert_abs_diff_eq!(w0.to_num::(), 0.25, epsilon = 0.0001); assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); - // Get per-subsubnet epoch outputs to build expectations - let out0 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(0), subsubnet_emissions[0]); - let out1 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(1), subsubnet_emissions[1]); + // Get per-mechanism epoch outputs to build expectations + let out0 = SubtensorModule::epoch_mechanism(netuid, MechId::from(0), mechanism_emissions[0]); + let out1 = SubtensorModule::epoch_mechanism(netuid, MechId::from(1), mechanism_emissions[1]); // Now run the real aggregated path (also persists terms) - let agg = SubtensorModule::epoch_with_subsubnets(netuid, emission); + let agg = SubtensorModule::epoch_with_mechanisms(netuid, emission); // hotkey -> (server_emission_u64, validator_emission_u64) let agg_map: BTreeMap = agg @@ -674,7 +674,7 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); - // Returned aggregated emissions match plain sums of subsubnet emissions + // Returned aggregated emissions match plain sums of mechanism emissions for hk in [&hk1, &hk2] { let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); let t0 = terms0(hk); @@ -689,7 +689,7 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); } - // Persisted per-subsubnet Incentive vectors match per-sub terms + // Persisted per-mechanism Incentive vectors match per-sub terms let inc0 = Incentive::::get(idx0); let inc1 = Incentive::::get(idx1); let exp_inc0 = { @@ -794,11 +794,11 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { } #[test] -fn epoch_with_subsubnets_no_weight_no_incentive() { +fn epoch_with_mechanisms_no_weight_no_incentive() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1u16); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(1)); let ck0 = U256::from(1); let hk0 = U256::from(2); let ck1 = U256::from(3); @@ -816,7 +816,7 @@ fn epoch_with_subsubnets_no_weight_no_incentive() { Weights::::insert(idx0, 0, vec![(1u16, 1), (2u16, 0)]); Weights::::insert(idx1, 0, vec![(1u16, 1), (2u16, 0)]); - SubtensorModule::epoch_with_subsubnets(netuid, emission); + SubtensorModule::epoch_with_mechanisms(netuid, emission); let actual_incentive_sub0 = Incentive::::get(idx0); let actual_incentive_sub1 = Incentive::::get(idx1); @@ -838,7 +838,7 @@ fn neuron_dereg_cleans_weights_across_subids() { let netuid = NetUid::from(77u16); let neuron_uid: u16 = 1; // we'll deregister UID=1 // two sub-subnets - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // Setup initial map values Emission::::insert( @@ -853,9 +853,9 @@ fn neuron_dereg_cleans_weights_across_subids() { Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); - // Clearing per-subid maps + // Clearing per-mecid maps for sub in [0u8, 1u8] { - let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + let idx = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(sub)); // Incentive vector: position 1 should become 0 Incentive::::insert(idx, vec![10u16, 20u16, 30u16]); @@ -887,9 +887,9 @@ fn neuron_dereg_cleans_weights_across_subids() { let d = Dividends::::get(netuid); assert_eq!(d, vec![7, 0, 17]); - // Per-subid cleanup + // Per-mecid cleanup for sub in [0u8, 1u8] { - let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + let idx = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(sub)); // Incentive element at index 1 set to 0 let inc = Incentive::::get(idx); @@ -911,7 +911,7 @@ fn neuron_dereg_cleans_weights_across_subids() { fn clear_neuron_handles_absent_rows_gracefully() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(55u16); - SubsubnetCountCurrent::::insert(netuid, SubId::from(1u8)); // single sub-subnet + MechanismCountCurrent::::insert(netuid, MechId::from(1u8)); // single sub-subnet // Minimal vectors with non-zero at index 0 (we will clear UID=0) Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); @@ -967,9 +967,9 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { 1.into(), ); - // Have at least two sub-subnets; write under subid = 1 - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); - let subid = SubId::from(1u8); + // Have at least two sub-subnets; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let mecid = MechId::from(1u8); // Call extrinsic let dests = vec![uid2, uid3]; @@ -977,26 +977,26 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { assert_ok!(SubtensorModule::set_sub_weights( RawOrigin::Signed(hk1).into(), netuid, - subid, + mecid, dests.clone(), weights.clone(), 0, // version_key )); - // Verify row exists under the chosen subid and not under a different subid - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + // Verify row exists under the chosen mecid and not under a different mecid + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); assert_eq!( Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFF)] ); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0u8)); assert!(Weights::::get(idx0, uid1).is_empty()); }); } #[test] -fn test_set_sub_weights_above_subsubnet_count_fails() { +fn test_set_sub_weights_above_mechanism_count_fails() { new_test_ext(0).execute_with(|| { let netuid = NetUid::from(1); let tempo: u16 = 13; @@ -1024,9 +1024,9 @@ fn test_set_sub_weights_above_subsubnet_count_fails() { 1.into(), ); - // Have exactly two sub-subnets; write under subid = 1 - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); - let subid_above = SubId::from(2u8); + // Have exactly two sub-subnets; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let subid_above = MechId::from(2u8); // Call extrinsic let dests = vec![uid2]; @@ -1040,7 +1040,7 @@ fn test_set_sub_weights_above_subsubnet_count_fails() { weights.clone(), 0, // version_key ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -1082,13 +1082,13 @@ fn test_commit_reveal_sub_weights_ok() { 1.into(), ); - // Ensure sub-subnet exists; write under subid = 1 - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); - let subid = SubId::from(1u8); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + // Ensure sub-subnet exists; write under mecid = 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let mecid = MechId::from(1u8); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, MechId::from(0u8)); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); - // Prepare payload and commit hash (include subid!) + // Prepare payload and commit hash (include mecid!) let dests = vec![uid2, uid3]; let weights = vec![88u16, 0xFFFFu16]; let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; @@ -1106,7 +1106,7 @@ fn test_commit_reveal_sub_weights_ok() { assert_ok!(SubtensorModule::commit_sub_weights( RuntimeOrigin::signed(hk1), netuid, - subid, + mecid, commit_hash )); @@ -1115,26 +1115,26 @@ fn test_commit_reveal_sub_weights_ok() { assert_ok!(SubtensorModule::reveal_sub_weights( RuntimeOrigin::signed(hk1), netuid, - subid, + mecid, dests.clone(), weights.clone(), salt, version_key )); - // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) + // Verify weights stored under the chosen mecid (normalized keeps max=0xFFFF here) assert_eq!( Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFFu16)] ); - // And not under a different subid + // And not under a different mecid assert!(Weights::::get(idx0, uid1).is_empty()); }); } #[test] -fn test_commit_reveal_above_subsubnet_count_fails() { +fn test_commit_reveal_above_mechanism_count_fails() { new_test_ext(1).execute_with(|| { System::set_block_number(0); @@ -1166,10 +1166,10 @@ fn test_commit_reveal_above_subsubnet_count_fails() { 1.into(), ); - // Ensure there are two subsubnets: 0 and 1 - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); - let subid_above = SubId::from(2u8); // non-existing sub-subnet - let idx2 = SubtensorModule::get_subsubnet_storage_index(netuid, subid_above); + // Ensure there are two mechanisms: 0 and 1 + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); + let subid_above = MechId::from(2u8); // non-existing sub-subnet + let idx2 = SubtensorModule::get_mechanism_storage_index(netuid, subid_above); // Prepare payload and commit hash let dests = vec![uid2]; @@ -1193,7 +1193,7 @@ fn test_commit_reveal_above_subsubnet_count_fails() { subid_above, commit_hash ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Advance one epoch, then attempt to reveal @@ -1223,14 +1223,14 @@ fn test_reveal_crv3_commits_sub_success() { System::set_block_number(0); let netuid = NetUid::from(1); - let subid = SubId::from(1u8); // write under sub-subnet #1 + let mecid = MechId::from(1u8); // write under sub-subnet #1 let hotkey1: AccountId = U256::from(1); let hotkey2: AccountId = U256::from(2); let reveal_round: u64 = 1000; add_network(netuid, 5, 0); - // ensure we actually have subid=1 available - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + // ensure we actually have mecid=1 available + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // Register neurons and set up configs register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); @@ -1252,7 +1252,7 @@ fn test_reveal_crv3_commits_sub_success() { let version_key = SubtensorModule::get_weights_version_key(netuid); - // Payload (same as legacy; subid is provided to the extrinsic) + // Payload (same as legacy; mecid is provided to the extrinsic) let payload = WeightsTlockPayload { hotkey: hotkey1.encode(), values: vec![10, 20], @@ -1282,7 +1282,7 @@ fn test_reveal_crv3_commits_sub_success() { assert_ok!(SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey1), netuid, - subid, + mecid, commit_bytes.clone().try_into().expect("bounded"), reveal_round, SubtensorModule::get_commit_reveal_weights_version() @@ -1302,11 +1302,11 @@ fn test_reveal_crv3_commits_sub_success() { // Run epochs so the commit is processed step_epochs(3, netuid); - // Verify weights applied under the selected subid index - let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + // Verify weights applied under the selected mecid index + let idx = SubtensorModule::get_mechanism_storage_index(netuid, mecid); let weights_sparse = SubtensorModule::get_weights_sparse(idx); let row = weights_sparse.get(uid1 as usize).cloned().unwrap_or_default(); - assert!(!row.is_empty(), "expected weights set for validator uid1 under subid"); + assert!(!row.is_empty(), "expected weights set for validator uid1 under mecid"); // Compare rounded normalized weights to expected proportions (like legacy test) let expected: Vec<(u16, I32F32)> = payload.uids.iter().zip(payload.values.iter()).map(|(&u,&v)|(u, I32F32::from_num(v))).collect(); @@ -1324,19 +1324,19 @@ fn test_reveal_crv3_commits_sub_success() { } #[test] -fn test_crv3_above_subsubnet_count_fails() { +fn test_crv3_above_mechanism_count_fails() { new_test_ext(100).execute_with(|| { System::set_block_number(0); let netuid = NetUid::from(1); - let subid_above = SubId::from(2u8); // non-existing sub-subnet + let subid_above = MechId::from(2u8); // non-existing sub-subnet let hotkey1: AccountId = U256::from(1); let hotkey2: AccountId = U256::from(2); let reveal_round: u64 = 1000; add_network(netuid, 5, 0); - // ensure we actually have subid=1 available - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + // ensure we actually have mecid=1 available + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // Register neurons and set up configs register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); @@ -1355,7 +1355,7 @@ fn test_crv3_above_subsubnet_count_fails() { let version_key = SubtensorModule::get_weights_version_key(netuid); - // Payload (same as legacy; subid is provided to the extrinsic) + // Payload (same as legacy; mecid is provided to the extrinsic) let payload = WeightsTlockPayload { hotkey: hotkey1.encode(), values: vec![10, 20], @@ -1391,7 +1391,7 @@ fn test_crv3_above_subsubnet_count_fails() { reveal_round, SubtensorModule::get_commit_reveal_weights_version() ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } @@ -1400,21 +1400,21 @@ fn test_crv3_above_subsubnet_count_fails() { fn test_do_commit_crv3_sub_weights_committing_too_fast() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(1); - let subid = SubId::from(1u8); + let mecid = MechId::from(1u8); let hotkey: AccountId = U256::from(1); let commit_data_1: Vec = vec![1, 2, 3]; let commit_data_2: Vec = vec![4, 5, 6]; let reveal_round: u64 = 1000; add_network(netuid, 5, 0); - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // allow subids {0,1} register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); SubtensorModule::set_weights_set_rate_limit(netuid, 5); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("uid"); - let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let idx1 = SubtensorModule::get_mechanism_storage_index(netuid, mecid); SubtensorModule::set_last_update_for_uid(idx1, uid, 0); // make validator with stake @@ -1428,22 +1428,22 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { 1.into(), ); - // first commit OK on subid=1 + // first commit OK on mecid=1 assert_ok!(SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey), netuid, - subid, + mecid, commit_data_1.clone().try_into().expect("bounded"), reveal_round, SubtensorModule::get_commit_reveal_weights_version() )); - // immediate second commit on SAME subid blocked + // immediate second commit on SAME mecid blocked assert_noop!( SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey), netuid, - subid, + mecid, commit_data_2.clone().try_into().expect("bounded"), reveal_round, SubtensorModule::get_commit_reveal_weights_version() @@ -1451,9 +1451,9 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { Error::::CommittingWeightsTooFast ); - // BUT committing too soon on a DIFFERENT subid is allowed - let other_subid = SubId::from(0u8); - let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, other_subid); + // BUT committing too soon on a DIFFERENT mecid is allowed + let other_subid = MechId::from(0u8); + let idx0 = SubtensorModule::get_mechanism_storage_index(netuid, other_subid); SubtensorModule::set_last_update_for_uid(idx0, uid, 0); // baseline like above assert_ok!(SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey), @@ -1464,13 +1464,13 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { SubtensorModule::get_commit_reveal_weights_version() )); - // still too fast on original subid after 2 blocks + // still too fast on original mecid after 2 blocks step_block(2); assert_noop!( SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey), netuid, - subid, + mecid, commit_data_2.clone().try_into().expect("bounded"), reveal_round, SubtensorModule::get_commit_reveal_weights_version() @@ -1478,12 +1478,12 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { Error::::CommittingWeightsTooFast ); - // after enough blocks, OK again on original subid + // after enough blocks, OK again on original mecid step_block(3); assert_ok!(SubtensorModule::commit_timelocked_sub_weights( RuntimeOrigin::signed(hotkey), netuid, - subid, + mecid, commit_data_2.try_into().expect("bounded"), reveal_round, SubtensorModule::get_commit_reveal_weights_version() @@ -1492,15 +1492,15 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { } #[test] -fn epoch_subsubnet_emergency_mode_distributes_by_stake() { +fn epoch_mechanism_emergency_mode_distributes_by_stake() { new_test_ext(1).execute_with(|| { // setup a single sub-subnet where consensus sum becomes 0 let netuid = NetUid::from(1u16); - let subid = SubId::from(1u8); - let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let mecid = MechId::from(1u8); + let idx = SubtensorModule::get_mechanism_storage_index(netuid, mecid); let tempo: u16 = 5; add_network(netuid, tempo, 0); - SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + MechanismCountCurrent::::insert(netuid, MechId::from(2u8)); // allow subids {0,1} SubtensorModule::set_max_registrations_per_block(netuid, 4); SubtensorModule::set_target_registrations_per_interval(netuid, 4); @@ -1556,7 +1556,7 @@ fn epoch_subsubnet_emergency_mode_distributes_by_stake() { let emission = AlphaCurrency::from(1_000_000u64); // --- act: run epoch on this sub-subnet only --- - let out = SubtensorModule::epoch_subsubnet(netuid, subid, emission); + let out = SubtensorModule::epoch_mechanism(netuid, mecid, emission); // collect validator emissions per hotkey let t0 = out.0.get(&hk0).unwrap(); diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 205b4977cd..e9c2c7eaed 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -22,7 +22,7 @@ mod serving; mod staking; mod staking2; mod subnet; -mod subsubnet; +mod mechanism; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index e61a7aee26..d230af6f30 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -430,7 +430,7 @@ fn test_recycle_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); assert_noop!( @@ -502,7 +502,7 @@ fn test_burn_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); assert_noop!( diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index b60f3ffa41..bc3941f969 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -52,7 +52,7 @@ fn test_do_start_call_fail_with_not_existed_subnet() { <::RuntimeOrigin>::signed(coldkey_account_id), netuid ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 190634212a..1125c9c17f 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -13,8 +13,8 @@ pub enum TransactionType { SetWeightsVersionKey, SetSNOwnerHotkey, OwnerHyperparamUpdate(Hyperparameter), - SubsubnetCountUpdate, - SubsubnetEmission, + MechanismCountUpdate, + MechanismEmission, MaxUidsTrimming, } @@ -25,8 +25,8 @@ impl TransactionType { Self::SetChildren => 150, // 30 minutes Self::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), Self::RegisterNetwork => NetworkRateLimit::::get(), - Self::SubsubnetCountUpdate => SubsubnetCountSetRateLimit::::get(), - Self::SubsubnetEmission => SubsubnetEmissionRateLimit::::get(), + Self::MechanismCountUpdate => MechanismCountSetRateLimit::::get(), + Self::MechanismEmission => MechanismEmissionRateLimit::::get(), Self::MaxUidsTrimming => MaxUidsTrimmingRateLimit::::get(), Self::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, @@ -138,8 +138,8 @@ impl From for u16 { TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, TransactionType::OwnerHyperparamUpdate(_) => 6, - TransactionType::SubsubnetCountUpdate => 7, - TransactionType::SubsubnetEmission => 8, + TransactionType::MechanismCountUpdate => 7, + TransactionType::MechanismEmission => 8, TransactionType::MaxUidsTrimming => 9, } } @@ -155,8 +155,8 @@ impl From for TransactionType { 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, 6 => TransactionType::OwnerHyperparamUpdate(Hyperparameter::Unknown), - 7 => TransactionType::SubsubnetCountUpdate, - 8 => TransactionType::SubsubnetEmission, + 7 => TransactionType::MechanismCountUpdate, + 8 => TransactionType::MechanismEmission, 9 => TransactionType::MaxUidsTrimming, _ => TransactionType::Unknown, } diff --git a/pallets/swap/src/pallet/mod.rs b/pallets/swap/src/pallet/mod.rs index 442c4852aa..554c7aeead 100644 --- a/pallets/swap/src/pallet/mod.rs +++ b/pallets/swap/src/pallet/mod.rs @@ -265,7 +265,7 @@ mod pallet { ReservesTooLow, /// The subnet does not exist. - SubNetworkDoesNotExist, + MechanismDoesNotExist, /// User liquidity operations are disabled for this subnet UserLiquidityDisabled, @@ -294,7 +294,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!(rate <= T::MaxFeeRate::get(), Error::::FeeRateTooHigh); @@ -331,7 +331,7 @@ mod pallet { ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); EnabledUserLiquidity::::insert(netuid, enable); @@ -366,7 +366,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( @@ -434,7 +434,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); // Remove liquidity @@ -494,7 +494,7 @@ mod pallet { // Ensure that the subnet exists. ensure!( T::SubnetInfo::exists(netuid.into()), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); ensure!( diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index 396bd656be..e9336bac26 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -141,7 +141,7 @@ mod dispatchables { NON_EXISTENT_NETUID.into(), true ), - Error::::SubNetworkDoesNotExist + Error::::MechanismDoesNotExist ); }); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 9eb2f8dbe9..dcdd85bfcf 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1057,10 +1057,10 @@ pub struct ResetBondsOnCommit; impl OnMetadataCommitment for ResetBondsOnCommit { #[cfg(not(feature = "runtime-benchmarks"))] fn on_metadata_commitment(netuid: NetUid, address: &AccountId) { - // Reset bonds for each subsubnet of this subnet - let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); - for subid in 0..u8::from(subsub_count) { - let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); + // Reset bonds for each mechanism of this subnet + let mechanism_count = SubtensorModule::get_current_mechanism_count(netuid); + for mecid in 0..u8::from(mechanism_count) { + let netuid_index = SubtensorModule::get_mechanism_storage_index(netuid, mecid.into()); let _ = SubtensorModule::do_reset_bonds(netuid_index, address); } } @@ -2334,8 +2334,8 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } - fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { - SubtensorModule::get_submetagraph(netuid, subid) + fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option> { + SubtensorModule::get_submetagraph(netuid, mecid) } fn get_subnet_state(netuid: NetUid) -> Option> { @@ -2358,8 +2358,8 @@ impl_runtime_apis! { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } - fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option> { - SubtensorModule::get_selective_submetagraph(netuid, subid, metagraph_indexes) + fn get_selective_submetagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_submetagraph(netuid, mecid, metagraph_indexes) } } From 7f4053935039ab22d06117a2e8f9c4a180bf7677 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 17 Sep 2025 12:27:19 -0400 Subject: [PATCH 265/379] Set mechanism limit to 2 --- pallets/admin-utils/src/lib.rs | 2 +- pallets/admin-utils/src/tests/mod.rs | 6 +++--- pallets/subtensor/rpc/src/lib.rs | 2 +- pallets/subtensor/runtime-api/src/lib.rs | 2 +- pallets/subtensor/src/coinbase/reveal_commits.rs | 2 +- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- pallets/subtensor/src/lib.rs | 8 ++++---- pallets/subtensor/src/rpc_info/metagraph.rs | 2 +- pallets/subtensor/src/subnets/mechanism.rs | 2 +- pallets/subtensor/src/subnets/weights.rs | 12 ++++++++++-- pallets/subtensor/src/swap/swap_hotkey.rs | 2 +- pallets/subtensor/src/tests/mechanism.rs | 13 +++++++++---- pallets/subtensor/src/tests/mod.rs | 2 +- 13 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index ab9bcc0e24..f53d386f93 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -28,7 +28,7 @@ pub mod pallet { use pallet_subtensor::utils::rate_limiting::{Hyperparameter, TransactionType}; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, MechId, TaoCurrency}; + use subtensor_runtime_common::{MechId, NetUid, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 033c711902..0e0232859b 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -14,7 +14,7 @@ use pallet_subtensor::{Event, utils::rate_limiting::TransactionType}; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, MechId, TaoCurrency}; +use subtensor_runtime_common::{Currency, MechId, NetUid, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -2325,8 +2325,8 @@ fn test_sudo_set_max_burn() { fn test_sudo_set_mechanism_count() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); - let ss_count_ok = MechId::from(8); - let ss_count_bad = MechId::from(9); + let ss_count_ok = MaxMechanismCount::::get(); + let ss_count_bad = MechId::from(u8::from(ss_count_ok) + 1); let sn_owner = U256::from(1324); add_network(netuid, 10); diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index 3ecdbf7464..44386b7aa4 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, MechId, TaoCurrency}; +use subtensor_runtime_common::{MechId, NetUid, TaoCurrency}; use sp_api::ProvideRuntimeApi; diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index e25dcc535e..86ab497c50 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, MechId, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index 3ddcb79f88..442f9d840f 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::{NetUid, MechId}; +use subtensor_runtime_common::{MechId, NetUid}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 4dd949d3eb..7ab4446c3e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -6,7 +6,7 @@ use safe_math::*; use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex}; #[derive(Debug, Default)] pub struct EpochTerms { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index f65191062b..e40542f79f 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -87,7 +87,7 @@ pub mod pallet { use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; use subtensor_runtime_common::{ - AlphaCurrency, Currency, NetUid, NetUidStorageIndex, MechId, TaoCurrency, + AlphaCurrency, Currency, MechId, NetUid, NetUidStorageIndex, TaoCurrency, }; #[cfg(not(feature = "std"))] @@ -1844,15 +1844,15 @@ pub mod pallet { #[pallet::type_value] /// -- ITEM (Maximum number of sub-subnets) pub fn MaxMechanismCount() -> MechId { - MechId::from(8) + MechId::from(2) } #[pallet::type_value] - /// -- ITEM (Rate limit for subsubnet count updates) + /// -- ITEM (Rate limit for mechanism count updates) pub fn MechanismCountSetRateLimit() -> u64 { prod_or_fast!(7_200, 1) } #[pallet::type_value] - /// -- ITEM (Rate limit for subsubnet emission distribution updates) + /// -- ITEM (Rate limit for mechanism emission distribution updates) pub fn MechanismEmissionRateLimit() -> u64 { prod_or_fast!(7_200, 1) } diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 4d842d26b8..61351afb00 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -8,7 +8,7 @@ use pallet_commitments::GetCommitments; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] diff --git a/pallets/subtensor/src/subnets/mechanism.rs b/pallets/subtensor/src/subnets/mechanism.rs index 862506aef5..f8fa76ad51 100644 --- a/pallets/subtensor/src/subnets/mechanism.rs +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -6,7 +6,7 @@ use crate::epoch::run_epoch::EpochTerms; use alloc::collections::BTreeMap; use safe_math::*; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, MechId}; +use subtensor_runtime_common::{AlphaCurrency, MechId, NetUid, NetUidStorageIndex}; pub type LeaseId = u32; diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 08cc3724d4..4dfd8b5e7e 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -10,7 +10,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, MechId}; +use subtensor_runtime_common::{MechId, NetUid, NetUidStorageIndex}; impl Pallet { /// ---- The implementation for committing weight hashes. @@ -425,7 +425,15 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { - Self::internal_reveal_weights(origin, netuid, MechId::MAIN, uids, values, salt, version_key) + Self::internal_reveal_weights( + origin, + netuid, + MechId::MAIN, + uids, + values, + salt, + version_key, + ) } pub fn do_reveal_sub_weights( diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index fdee2182ba..4509c57864 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid, MechId}; +use subtensor_runtime_common::{Currency, MechId, NetUid}; impl Pallet { /// Swaps the hotkey of a coldkey account. diff --git a/pallets/subtensor/src/tests/mechanism.rs b/pallets/subtensor/src/tests/mechanism.rs index 3ab6ad785a..933bf574fd 100644 --- a/pallets/subtensor/src/tests/mechanism.rs +++ b/pallets/subtensor/src/tests/mechanism.rs @@ -52,7 +52,7 @@ use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::{I32F32, U64F64}; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, MechId}; +use subtensor_runtime_common::{MechId, NetUid, NetUidStorageIndex}; use tle::{ curves::drand::TinyBLS381, ibe::fullident::Identity, stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, @@ -197,7 +197,10 @@ fn do_set_mechanism_count_ok_minimal() { MechId::from(1u8) )); - assert_eq!(MechanismCountCurrent::::get(netuid), MechId::from(1u8)); + assert_eq!( + MechanismCountCurrent::::get(netuid), + MechId::from(1u8) + ); }); } @@ -658,8 +661,10 @@ fn epoch_with_mechanisms_persists_and_aggregates_all_terms() { assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); // Get per-mechanism epoch outputs to build expectations - let out0 = SubtensorModule::epoch_mechanism(netuid, MechId::from(0), mechanism_emissions[0]); - let out1 = SubtensorModule::epoch_mechanism(netuid, MechId::from(1), mechanism_emissions[1]); + let out0 = + SubtensorModule::epoch_mechanism(netuid, MechId::from(0), mechanism_emissions[0]); + let out1 = + SubtensorModule::epoch_mechanism(netuid, MechId::from(1), mechanism_emissions[1]); // Now run the real aggregated path (also persists terms) let agg = SubtensorModule::epoch_with_mechanisms(netuid, emission); diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index e9c2c7eaed..b9f4ff5366 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -10,6 +10,7 @@ mod epoch; mod evm; mod leasing; mod math; +mod mechanism; mod migration; mod mock; mod move_stake; @@ -22,7 +23,6 @@ mod serving; mod staking; mod staking2; mod subnet; -mod mechanism; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; From 0d421e51ae663a0fe98bb84ccf948fa3d31241cf Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 17 Sep 2025 12:33:37 -0400 Subject: [PATCH 266/379] Rename submetagraph to mechagraph in RPC calls --- pallets/subtensor/rpc/src/lib.rs | 24 ++++++++++----------- pallets/subtensor/runtime-api/src/lib.rs | 6 +++--- pallets/subtensor/src/rpc_info/metagraph.rs | 12 +++++------ runtime/src/lib.rs | 12 +++++------ 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index 44386b7aa4..2c57d0cae4 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -72,10 +72,10 @@ pub trait SubtensorCustomApi { fn get_all_metagraphs(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getMetagraph")] fn get_metagraph(&self, netuid: NetUid, at: Option) -> RpcResult>; - #[method(name = "subnetInfo_getAllSubMetagraphs")] - fn get_all_submetagraphs(&self, at: Option) -> RpcResult>; - #[method(name = "subnetInfo_getSubMetagraph")] - fn get_submetagraph( + #[method(name = "subnetInfo_getAllMechagraphs")] + fn get_all_mechagraphs(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getMechagraph")] + fn get_mechagraph( &self, netuid: NetUid, mecid: MechId, @@ -92,8 +92,8 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; - #[method(name = "subnetInfo_getSelectiveSubMetagraph")] - fn get_selective_submetagraph( + #[method(name = "subnetInfo_getSelectiveMechagraph")] + fn get_selective_mechagraph( &self, netuid: NetUid, mecid: MechId, @@ -336,11 +336,11 @@ where } } - fn get_all_submetagraphs(&self, at: Option<::Hash>) -> RpcResult> { + fn get_all_mechagraphs(&self, at: Option<::Hash>) -> RpcResult> { let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); - match api.get_all_submetagraphs(at) { + match api.get_all_mechagraphs(at) { Ok(result) => Ok(result.encode()), Err(e) => Err(Error::RuntimeError(format!("Unable to get metagraps: {e:?}")).into()), } @@ -379,7 +379,7 @@ where } } - fn get_submetagraph( + fn get_mechagraph( &self, netuid: NetUid, mecid: MechId, @@ -387,7 +387,7 @@ where ) -> RpcResult> { let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); - match api.get_submetagraph(at, netuid, mecid) { + match api.get_mechagraph(at, netuid, mecid) { Ok(result) => Ok(result.encode()), Err(e) => Err(Error::RuntimeError(format!( "Unable to get dynamic subnets info: {e:?}" @@ -472,7 +472,7 @@ where } } - fn get_selective_submetagraph( + fn get_selective_mechagraph( &self, netuid: NetUid, mecid: MechId, @@ -482,7 +482,7 @@ where let api = self.client.runtime_api(); let at = at.unwrap_or_else(|| self.client.info().best_hash); - match api.get_selective_submetagraph(at, netuid, mecid, metagraph_index) { + match api.get_selective_mechagraph(at, netuid, mecid, metagraph_index) { Ok(result) => Ok(result.encode()), Err(e) => { Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 86ab497c50..dc9b4244c0 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -40,12 +40,12 @@ sp_api::decl_runtime_apis! { fn get_all_dynamic_info() -> Vec>>; fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; - fn get_all_submetagraphs() -> Vec>>; - fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option>; + fn get_all_mechagraphs() -> Vec>>; + fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; - fn get_selective_submetagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option>; + fn get_selective_mechagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 61351afb00..d0d7b1b94e 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -805,7 +805,7 @@ impl Pallet { metagraphs } - pub fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option> { + pub fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option> { if Self::ensure_mechanism_exists(netuid, mecid).is_err() { return None; } @@ -832,13 +832,13 @@ impl Pallet { } } - pub fn get_all_submetagraphs() -> Vec>> { + pub fn get_all_mechagraphs() -> Vec>> { let netuids = Self::get_all_subnet_netuids(); let mut metagraphs = Vec::>>::new(); for netuid in netuids.clone().iter() { let mechanism_count = u8::from(MechanismCountCurrent::::get(netuid)); for mecid in 0..mechanism_count { - metagraphs.push(Self::get_submetagraph(*netuid, MechId::from(mecid))); + metagraphs.push(Self::get_mechagraph(*netuid, MechId::from(mecid))); } } metagraphs @@ -860,7 +860,7 @@ impl Pallet { } } - pub fn get_selective_submetagraph( + pub fn get_selective_mechagraph( netuid: NetUid, mecid: MechId, metagraph_indexes: Vec, @@ -870,7 +870,7 @@ impl Pallet { } else { let mut result = SelectiveMetagraph::default(); for index in metagraph_indexes.iter() { - let value = Self::get_single_selective_submetagraph(netuid, mecid, *index); + let value = Self::get_single_selective_mechagraph(netuid, mecid, *index); result.merge_value(&value, *index as usize); } Some(result) @@ -1441,7 +1441,7 @@ impl Pallet { } } - fn get_single_selective_submetagraph( + fn get_single_selective_mechagraph( netuid: NetUid, mecid: MechId, metagraph_index: u16, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index dcdd85bfcf..b0abf24710 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -2334,8 +2334,8 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } - fn get_submetagraph(netuid: NetUid, mecid: MechId) -> Option> { - SubtensorModule::get_submetagraph(netuid, mecid) + fn get_mechagraph(netuid: NetUid, mecid: MechId) -> Option> { + SubtensorModule::get_mechagraph(netuid, mecid) } fn get_subnet_state(netuid: NetUid) -> Option> { @@ -2346,8 +2346,8 @@ impl_runtime_apis! { SubtensorModule::get_all_metagraphs() } - fn get_all_submetagraphs() -> Vec>> { - SubtensorModule::get_all_submetagraphs() + fn get_all_mechagraphs() -> Vec>> { + SubtensorModule::get_all_mechagraphs() } fn get_all_dynamic_info() -> Vec>> { @@ -2358,8 +2358,8 @@ impl_runtime_apis! { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } - fn get_selective_submetagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option> { - SubtensorModule::get_selective_submetagraph(netuid, mecid, metagraph_indexes) + fn get_selective_mechagraph(netuid: NetUid, mecid: MechId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_mechagraph(netuid, mecid, metagraph_indexes) } } From ab23480f919863dbc75c9ca80b825794ac44634e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 17 Sep 2025 10:44:41 -0700 Subject: [PATCH 267/379] continue on error --- pallets/swap/src/pallet/impls.rs | 44 +++++++++++++++++--------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index c3652b10c9..ce4336df78 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1215,9 +1215,6 @@ impl Pallet { /// Dissolve all LPs and clean state. pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { - let user_lp_enabled = - >::is_user_liquidity_enabled(netuid); - if SwapV3Initialized::::get(netuid) { // 1) Snapshot (owner, position_id). struct CloseItem { @@ -1236,21 +1233,28 @@ impl Pallet { .sort_by(|a, b| (a.owner == protocol_account).cmp(&(b.owner == protocol_account))); for CloseItem { owner, pos_id } in to_close.into_iter() { - let rm = Self::do_remove_liquidity(netuid, &owner, pos_id)?; - - // τ: refund **principal only** (no τ fees). - if rm.tao > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, rm.tao); - } - - if owner != protocol_account { - // Principal reserves decrease - T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); - - // Burn α (principal + fees) from provided reserves; do not credit to users. - let alpha_burn = rm.alpha.saturating_add(rm.fee_alpha); - if alpha_burn > AlphaCurrency::ZERO { - T::BalanceOps::decrease_provided_alpha_reserve(netuid, alpha_burn); + match Self::do_remove_liquidity(netuid, &owner, pos_id) { + Ok(rm) => { + if rm.tao > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, rm.tao); + } + if owner != protocol_account { + T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + let alpha_burn = rm.alpha.saturating_add(rm.fee_alpha); + if alpha_burn > AlphaCurrency::ZERO { + T::BalanceOps::decrease_provided_alpha_reserve(netuid, alpha_burn); + } + } + } + Err(e) => { + log::debug!( + "dissolve_all_lp: force-closing failed position: netuid={:?}, owner={:?}, pos_id={:?}, err={:?}", + netuid, + owner, + pos_id, + e + ); + continue; } } } @@ -1277,7 +1281,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, user_lp_enabled={user_lp_enabled}, positions closed; τ principal refunded; α burned; state cleared" + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, positions closed; τ principal refunded; α burned; state cleared" ); return Ok(()); @@ -1305,7 +1309,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, user_lp_enabled={user_lp_enabled}, state_cleared" + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, state_cleared" ); Ok(()) From 4ce7b38cdcb25c9d2e627be3df68ea81d59f8492 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 17 Sep 2025 10:54:52 -0700 Subject: [PATCH 268/379] clippy --- pallets/swap/src/pallet/impls.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index ce4336df78..c0a109bfb5 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1248,11 +1248,7 @@ impl Pallet { } Err(e) => { log::debug!( - "dissolve_all_lp: force-closing failed position: netuid={:?}, owner={:?}, pos_id={:?}, err={:?}", - netuid, - owner, - pos_id, - e + "dissolve_all_lp: force-closing failed position: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, err={e:?}" ); continue; } From d79c235728dbe23bd5f11b8c5e8d0b82924082bc Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:08:04 -0400 Subject: [PATCH 269/379] fix typos, clpy --- .../subtensor/src/coinbase/run_coinbase.rs | 6 +-- pallets/subtensor/src/lib.rs | 43 +++---------------- pallets/subtensor/src/staking/helpers.rs | 3 +- 3 files changed, 9 insertions(+), 43 deletions(-) diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index ee0e50b96e..c4a499c933 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -487,8 +487,6 @@ impl Pallet { } } - let maybe_owner_hotkey = SubnetOwnerHotkey::::try_get(netuid); - // Distribute mining incentives. let subnet_owner_coldkey = SubnetOwner::::get(netuid); let owner_hotkeys = Self::get_owner_hotkeys(netuid, &subnet_owner_coldkey); @@ -503,11 +501,11 @@ impl Pallet { ); // Check if we should recycle or burn the incentive match RecycleOrBurn::::try_get(netuid) { - Ok(RecycleOrBurn::Recycle) => { + Ok(RecycleOrBurnEnum::Recycle) => { log::debug!("recycling {incentive:?}"); Self::recycle_subnet_alpha(netuid, incentive); } - Ok(RecycleOrBurn::Burn) | Err(_) => { + Ok(RecycleOrBurnEnum::Burn) | Err(_) => { log::debug!("burning {incentive:?}"); Self::burn_subnet_alpha(netuid, incentive); } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 26c0808e9a..e6472da315 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -306,43 +306,12 @@ pub mod pallet { } /// Enum for recycle or burn for the owner_uid(s) - /// - /// Can specify - #[derive(TypeInfo, Clone, PartialEq, Eq, Debug)] - #[default = Self::Burn(U16::MAX)] // default to burn everything + #[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, Debug)] pub enum RecycleOrBurnEnum { - Burn(u16), // u16-normalized weight - Recycle(u16), - } - impl codec::EncodeLike for RecycleOrBurnEnum { - fn encode_to(&self, e: &mut E) -> Result<(), E::Error> { - match self { - Self::Burn(weight) => { - e.encode_u8(0)?; - e.encode_u16(*weight) - } - Self::Recycle(weight) => { - e.encode_u8(1)?; - e.encode_u16(*weight) - } - } - } - } - impl codec::DecodeLike for RecycleOrBurnEnum { - fn decode(d: &mut D) -> Result { - let tag = d.read_byte()?; - match tag { - 0 => { - let weight = d.read_u16()?; - Ok(Self::Burn(weight)) - } - 1 => { - let weight = d.read_u16()?; - Ok(Self::Recycle(weight)) - } - _ => Err(codec::Error::from("invalid tag")), - } - } + /// Burn the miner emission sent to the burn UID + Burn, + /// Recycle the miner emission sent to the recycle UID + Recycle, } /// ============================ @@ -585,7 +554,7 @@ pub mod pallet { #[pallet::type_value] /// Default value for recycle or burn. pub fn DefaultRecycleOrBurn() -> RecycleOrBurnEnum { - RecycleOrBurnEnum::Burn(U16::MAX) // default to burn + RecycleOrBurnEnum::Burn // default to burn } #[pallet::type_value] /// Default value for network rate limit. diff --git a/pallets/subtensor/src/staking/helpers.rs b/pallets/subtensor/src/staking/helpers.rs index 85789aa5ea..1625afa811 100644 --- a/pallets/subtensor/src/staking/helpers.rs +++ b/pallets/subtensor/src/staking/helpers.rs @@ -329,8 +329,7 @@ impl Pallet { }); } - pub fn burn_subnet_alpha(netuid: NetUid, amount: AlphaCurrency) { + pub fn burn_subnet_alpha(_netuid: NetUid, _amount: AlphaCurrency) { // Do nothing; TODO: record burned alpha in a tracker - return; } } From c19aceef0c7626900fcca9314889901bf9891488 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:08:15 -0400 Subject: [PATCH 270/379] use helper in burn ext --- pallets/subtensor/src/staking/recycle_alpha.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 4c1bbd0b9f..f4c35e53c4 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -126,7 +126,7 @@ impl Pallet { &hotkey, &coldkey, netuid, amount, ); - // This is a burn, so we don't need to update AlphaOut. + Self::burn_subnet_alpha(netuid, amount); // Deposit event Self::deposit_event(Event::AlphaBurned( From 1d4c5cf1b1eb17bd58356709e550b8433a3b2f7d Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:08:43 -0400 Subject: [PATCH 271/379] rename burn_tokens -> recycle_tao --- pallets/subtensor/src/subnets/subnet.rs | 2 +- pallets/subtensor/src/swap/swap_coldkey.rs | 4 ++-- pallets/subtensor/src/utils/misc.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index 6241c54ef7..e293f36d68 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -194,7 +194,7 @@ impl Pallet { SubnetOwnerHotkey::::insert(netuid_to_register, hotkey.clone()); if actual_tao_lock_amount_less_pool_tao > TaoCurrency::ZERO { - Self::burn_tokens(actual_tao_lock_amount_less_pool_tao); + Self::recycle_tao(actual_tao_lock_amount_less_pool_tao); } if actual_tao_lock_amount > TaoCurrency::ZERO && pool_initial_tao > TaoCurrency::ZERO { diff --git a/pallets/subtensor/src/swap/swap_coldkey.rs b/pallets/subtensor/src/swap/swap_coldkey.rs index f7f9997183..8180650bf8 100644 --- a/pallets/subtensor/src/swap/swap_coldkey.rs +++ b/pallets/subtensor/src/swap/swap_coldkey.rs @@ -62,10 +62,10 @@ impl Pallet { Error::::NotEnoughBalanceToPaySwapColdKey ); - // 7. Remove and burn the swap cost from the old coldkey's account + // 7. Remove and recycle the swap cost from the old coldkey's account let actual_burn_amount = Self::remove_balance_from_coldkey_account(old_coldkey, swap_cost.into())?; - Self::burn_tokens(actual_burn_amount); + Self::recycle_tao(actual_burn_amount); // 8. Update the weight for the balance operations weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 7ab96d65cf..87001f5f2c 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -372,7 +372,7 @@ impl Pallet { // ======================== // === Token Management === // ======================== - pub fn burn_tokens(amount: TaoCurrency) { + pub fn recycle_tao(amount: TaoCurrency) { TotalIssuance::::put(TotalIssuance::::get().saturating_sub(amount)); } pub fn increase_issuance(amount: TaoCurrency) { From 0472cfe967810955d23f0d59f4b2373d7a26d267 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:17:56 -0400 Subject: [PATCH 272/379] add rate limit and admin utils call --- pallets/admin-utils/src/lib.rs | 32 ++++++++++++++++++++ pallets/subtensor/src/utils/rate_limiting.rs | 1 + 2 files changed, 33 insertions(+) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 5569e286b9..c8885b0a0f 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1493,6 +1493,38 @@ pub mod pallet { res } + /// Set the behaviour of the "burn" UID(s) for a given subnet. + /// If set to `Burn`, the miner emission sent to the burn UID(s) will be burned. + /// If set to `Recycle`, the miner emission sent to the burn UID(s) will be recycled. + /// + /// # Parameters + /// - `origin`: The origin of the call, which must be the root account or subnet owner. + /// - `netuid`: The unique identifier for the subnet. + /// - `recycle_or_burn`: The desired behaviour of the "burn" UID(s) for the subnet. + /// + #[pallet::call_index(80)] + #[pallet::weight((1_000_000, DispatchClass::Normal, Pays::Yes))] // TODO: add proper weights + pub fn sudo_set_recycle_or_burn( + origin: OriginFor, + netuid: NetUid, + recycle_or_burn: RecycleOrBurnEnum, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[Hyperparameter::RecycleOrBurn.into()], + )?; + let res = pallet_subtensor::Pallet::::set_recycle_or_burn(netuid, recycle_or_burn); + if res.is_ok() { + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::RecycleOrBurn.into()], + ); + } + res + } + /// Toggles the enablement of an EVM precompile. /// /// # Arguments diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 190634212a..b877e39bc0 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -196,6 +196,7 @@ pub enum Hyperparameter { Yuma3Enabled = 21, BondsResetEnabled = 22, ImmuneNeuronLimit = 23, + RecycleOrBurn = 24, } impl Pallet { From 542964d8f70a106370ef4beac6377312a4ee23ef Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:47:36 -0400 Subject: [PATCH 273/379] fix references --- pallets/subtensor/src/swap/swap_hotkey.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 19737f765c..16b8229136 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -97,11 +97,11 @@ impl Pallet { weight.saturating_accrue(T::DbWeight::get().reads_writes(3, 0)); // 14. Remove the swap cost from the coldkey's account - let actual_burn_amount = + let actual_recycle_amount = Self::remove_balance_from_coldkey_account(&coldkey, swap_cost.into())?; - // 18. Burn the tokens - Self::burn_tokens(actual_burn_amount); + // 18. Recycle the tokens + Self::recycle_tao(actual_recycle_amount); weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 2)); // 19. Perform the hotkey swap @@ -296,11 +296,11 @@ impl Pallet { ); // 5. Remove the swap cost from the coldkey's account - let actual_burn_amount = Self::remove_balance_from_coldkey_account(coldkey, swap_cost)?; + let actual_recycle_amount = Self::remove_balance_from_coldkey_account(coldkey, swap_cost)?; weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); - // 6. Burn the tokens - Self::burn_tokens(actual_burn_amount); + // 6. Recycle the tokens + Self::recycle_tao(actual_recycle_amount); weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); // 7. Swap owner. From 32cd5a4ee311ab6243a67c3497159e42e505fb40 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:53:49 -0400 Subject: [PATCH 274/379] add decode derive, fixes --- pallets/admin-utils/src/lib.rs | 2 +- pallets/subtensor/src/lib.rs | 2 +- pallets/subtensor/src/utils/misc.rs | 6 ++++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index c8885b0a0f..1341a92ac8 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1507,7 +1507,7 @@ pub mod pallet { pub fn sudo_set_recycle_or_burn( origin: OriginFor, netuid: NetUid, - recycle_or_burn: RecycleOrBurnEnum, + recycle_or_burn: pallet_subtensor::RecycleOrBurnEnum, ) -> DispatchResult { let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( origin, diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 08f506cda3..7bf88594eb 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -306,7 +306,7 @@ pub mod pallet { } /// Enum for recycle or burn for the owner_uid(s) - #[derive(TypeInfo, Encode, Decode, Clone, PartialEq, Eq, Debug)] + #[derive(TypeInfo, Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, Eq, Debug)] pub enum RecycleOrBurnEnum { /// Burn the miner emission sent to the burn UID Burn, diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 799fe2536d..b0a981f75d 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -393,6 +393,12 @@ impl Pallet { total_subnet_locked.into() } + pub fn set_recycle_or_burn(netuid: NetUid, recycle_or_burn: RecycleOrBurnEnum) { + RecycleOrBurn::::insert(netuid, recycle_or_burn); + + Ok(()) + } + // ======================== // ========= Sudo ========= // ======================== From 0ffb346667a6259add86b68a5983334aef94842e Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:55:05 -0400 Subject: [PATCH 275/379] remove Ok from setter --- pallets/admin-utils/src/lib.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 1341a92ac8..b46c397fbe 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1514,15 +1514,15 @@ pub mod pallet { netuid, &[Hyperparameter::RecycleOrBurn.into()], )?; - let res = pallet_subtensor::Pallet::::set_recycle_or_burn(netuid, recycle_or_burn); - if res.is_ok() { - pallet_subtensor::Pallet::::record_owner_rl( - maybe_owner, - netuid, - &[Hyperparameter::RecycleOrBurn.into()], - ); - } - res + + pallet_subtensor::Pallet::::set_recycle_or_burn(netuid, recycle_or_burn); + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[Hyperparameter::RecycleOrBurn.into()], + ); + + Ok(()) } /// Toggles the enablement of an EVM precompile. From c6742745b47802c7c40769cbbbc0620c9938a9e4 Mon Sep 17 00:00:00 2001 From: Cameron Fairchild Date: Wed, 17 Sep 2025 15:58:24 -0400 Subject: [PATCH 276/379] remove return value --- pallets/subtensor/src/utils/misc.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index b0a981f75d..9ca7e361cc 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -395,8 +395,6 @@ impl Pallet { pub fn set_recycle_or_burn(netuid: NetUid, recycle_or_burn: RecycleOrBurnEnum) { RecycleOrBurn::::insert(netuid, recycle_or_burn); - - Ok(()) } // ======================== From ff26cbcbac457a28852c13b51078ed05307b1f66 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 17 Sep 2025 13:40:19 -0700 Subject: [PATCH 277/379] remove migrate_restore_subnet_locked --- pallets/subtensor/src/macros/hooks.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index a3cb7a692f..13713354c8 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -145,9 +145,7 @@ mod hooks { // Migrate Subnet Limit .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()) // Migrate Lock Reduction Interval - .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()) - // Migrate subnet locked balances - .saturating_add(migrations::migrate_subnet_locked::migrate_restore_subnet_locked::()); + .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()); weight } From 82725326fea84fbcef75583f05ab1ab2cf2b409f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 17 Sep 2025 14:45:51 -0700 Subject: [PATCH 278/379] set locked balances for 65-128 --- pallets/subtensor/src/macros/hooks.rs | 4 +- .../src/migrations/migrate_subnet_locked.rs | 129 +++++++------- pallets/subtensor/src/tests/migration.rs | 161 +++++++++--------- 3 files changed, 146 insertions(+), 148 deletions(-) diff --git a/pallets/subtensor/src/macros/hooks.rs b/pallets/subtensor/src/macros/hooks.rs index 13713354c8..a3cb7a692f 100644 --- a/pallets/subtensor/src/macros/hooks.rs +++ b/pallets/subtensor/src/macros/hooks.rs @@ -145,7 +145,9 @@ mod hooks { // Migrate Subnet Limit .saturating_add(migrations::migrate_subnet_limit_to_default::migrate_subnet_limit_to_default::()) // Migrate Lock Reduction Interval - .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()); + .saturating_add(migrations::migrate_network_lock_reduction_interval::migrate_network_lock_reduction_interval::()) + // Migrate subnet locked balances + .saturating_add(migrations::migrate_subnet_locked::migrate_restore_subnet_locked::()); weight } diff --git a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs index e72881ea7d..a430993f9d 100644 --- a/pallets/subtensor/src/migrations/migrate_subnet_locked.rs +++ b/pallets/subtensor/src/migrations/migrate_subnet_locked.rs @@ -19,71 +19,72 @@ pub fn migrate_restore_subnet_locked() -> Weight { return weight; } - // (netuid, locked_rao) pairs taken from the historical snapshot (block #4_828_623). + // Snapshot: NetworkLastLockCost at (registration_block + 1) for each netuid. const SUBNET_LOCKED: &[(u16, u64)] = &[ - (2, 976_893_069_056), - (3, 2_569_362_397_490), - (4, 1_928_551_593_932), - (5, 1_712_540_082_588), - (6, 1_495_929_556_770), - (7, 1_011_702_451_936), - (8, 337_484_391_024), - (9, 381_240_180_320), - (10, 1_253_515_128_353), - (11, 1_453_924_672_132), - (12, 100_000_000_000), - (13, 100_000_000_000), - (14, 1_489_714_521_808), - (15, 1_784_089_225_496), - (16, 889_176_219_484), - (17, 1_266_310_122_772), - (18, 222_355_058_433), - (19, 100_000_000_000), - (20, 100_000_000_000), - (21, 885_096_322_978), - (22, 100_000_000_000), - (23, 100_000_000_000), - (24, 5_146_073_854_481), - (25, 1_782_920_948_214), - (26, 153_583_865_248), - (27, 201_344_183_084), - (28, 901_455_879_445), - (29, 175_000_001_600), - (30, 1_419_730_660_074), - (31, 319_410_100_502), - (32, 2_016_397_028_246), - (33, 1_626_477_274_174), - (34, 1_455_297_496_345), - (35, 1_191_275_979_639), - (36, 1_097_008_574_216), - (37, 864_664_455_362), - (38, 1_001_936_494_076), - (39, 1_366_096_404_884), - (40, 100_000_000_000), - (41, 535_937_523_200), - (42, 1_215_698_423_344), - (43, 1_641_308_676_800), - (44, 1_514_636_189_434), - (45, 1_605_608_381_438), - (46, 1_095_943_027_350), - (47, 1_499_235_469_986), - (48, 1_308_073_720_362), - (49, 1_222_672_092_068), - (50, 2_628_355_421_561), - (51, 1_520_860_720_561), - (52, 1_794_457_248_725), - (53, 1_721_472_811_492), - (54, 2_048_900_691_868), - (55, 1_278_597_446_119), - (56, 2_016_045_544_480), - (57, 1_920_563_399_676), - (58, 2_246_525_691_504), - (59, 1_776_159_384_888), - (60, 2_173_138_865_414), - (61, 1_435_634_867_728), - (62, 2_061_282_563_888), - (63, 3_008_967_320_998), - (64, 2_099_236_359_026), + (65, 37_274_536_408), + (66, 65_230_444_016), + (67, 114_153_284_032), + (68, 199_768_252_064), + (69, 349_594_445_728), + (70, 349_412_366_216), + (71, 213_408_488_702), + (72, 191_341_473_067), + (73, 246_711_333_592), + (74, 291_874_466_228), + (75, 247_485_227_056), + (76, 291_241_991_316), + (77, 303_154_601_714), + (78, 287_407_417_932), + (79, 254_935_051_664), + (80, 255_413_055_349), + (81, 249_790_431_509), + (82, 261_343_249_180), + (83, 261_361_408_796), + (84, 201_938_003_214), + (85, 264_805_234_604), + (86, 223_171_973_880), + (87, 180_397_358_280), + (88, 270_596_039_760), + (89, 286_399_608_951), + (90, 267_684_201_301), + (91, 284_637_542_762), + (92, 288_373_410_868), + (93, 290_836_604_849), + (94, 270_861_792_144), + (95, 210_595_055_304), + (96, 315_263_727_200), + (97, 158_244_884_792), + (98, 168_102_223_900), + (99, 252_153_339_800), + (100, 378_230_014_000), + (101, 205_977_765_866), + (102, 149_434_017_849), + (103, 135_476_471_008), + (104, 147_970_415_680), + (105, 122_003_668_139), + (106, 133_585_556_570), + (107, 200_137_144_216), + (108, 106_767_623_816), + (109, 124_280_483_748), + (110, 186_420_726_696), + (111, 249_855_564_892), + (112, 196_761_272_984), + (113, 147_120_048_727), + (114, 84_021_895_534), + (115, 98_002_215_656), + (116, 89_944_262_256), + (117, 107_183_582_952), + (118, 110_644_724_664), + (119, 99_380_483_902), + (120, 138_829_019_156), + (121, 111_988_743_976), + (122, 130_264_686_152), + (123, 118_034_291_488), + (124, 79_312_501_676), + (125, 43_214_310_704), + (126, 64_755_449_962), + (127, 97_101_698_382), + (128, 145_645_807_991), ]; let mut inserted: u32 = 0; diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index 1e58d2f7ab..816c87837e 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -1816,104 +1816,102 @@ fn test_migrate_network_lock_reduction_interval_and_decay() { } #[test] -fn test_migrate_restore_subnet_locked_feb1_2025() { - use sp_runtime::traits::SaturatedConversion; // only for NetUid -> u16 when reading back - use std::collections::BTreeMap; - - use crate::{HasMigrationRun, SubnetLocked, TaoCurrency}; - - // NOTE: Ensure the migration uses `TaoCurrency::from(rao_u64)` and a `&[(u16, u64)]` snapshot. +fn test_migrate_restore_subnet_locked_65_128() { + use sp_runtime::traits::SaturatedConversion; new_test_ext(0).execute_with(|| { - // ── pre ────────────────────────────────────────────────────────────── let name = b"migrate_restore_subnet_locked".to_vec(); assert!( !HasMigrationRun::::get(name.clone()), "HasMigrationRun should be false before migration" ); - // Snapshot at block #4_828_623 (2025-02-01 00:00:00Z), RAO as u64. + // Expected snapshot for netuids 65..128. const EXPECTED: &[(u16, u64)] = &[ - (2, 976_893_069_056), - (3, 2_569_362_397_490), - (4, 1_928_551_593_932), - (5, 1_712_540_082_588), - (6, 1_495_929_556_770), - (7, 1_011_702_451_936), - (8, 337_484_391_024), - (9, 381_240_180_320), - (10, 1_253_515_128_353), - (11, 1_453_924_672_132), - (12, 100_000_000_000), - (13, 100_000_000_000), - (14, 1_489_714_521_808), - (15, 1_784_089_225_496), - (16, 889_176_219_484), - (17, 1_266_310_122_772), - (18, 222_355_058_433), - (19, 100_000_000_000), - (20, 100_000_000_000), - (21, 885_096_322_978), - (22, 100_000_000_000), - (23, 100_000_000_000), - (24, 5_146_073_854_481), - (25, 1_782_920_948_214), - (26, 153_583_865_248), - (27, 201_344_183_084), - (28, 901_455_879_445), - (29, 175_000_001_600), - (30, 1_419_730_660_074), - (31, 319_410_100_502), - (32, 2_016_397_028_246), - (33, 1_626_477_274_174), - (34, 1_455_297_496_345), - (35, 1_191_275_979_639), - (36, 1_097_008_574_216), - (37, 864_664_455_362), - (38, 1_001_936_494_076), - (39, 1_366_096_404_884), - (40, 100_000_000_000), - (41, 535_937_523_200), - (42, 1_215_698_423_344), - (43, 1_641_308_676_800), - (44, 1_514_636_189_434), - (45, 1_605_608_381_438), - (46, 1_095_943_027_350), - (47, 1_499_235_469_986), - (48, 1_308_073_720_362), - (49, 1_222_672_092_068), - (50, 2_628_355_421_561), - (51, 1_520_860_720_561), - (52, 1_794_457_248_725), - (53, 1_721_472_811_492), - (54, 2_048_900_691_868), - (55, 1_278_597_446_119), - (56, 2_016_045_544_480), - (57, 1_920_563_399_676), - (58, 2_246_525_691_504), - (59, 1_776_159_384_888), - (60, 2_173_138_865_414), - (61, 1_435_634_867_728), - (62, 2_061_282_563_888), - (63, 3_008_967_320_998), - (64, 2_099_236_359_026), + (65, 37_274_536_408), + (66, 65_230_444_016), + (67, 114_153_284_032), + (68, 199_768_252_064), + (69, 349_594_445_728), + (70, 349_412_366_216), + (71, 213_408_488_702), + (72, 191_341_473_067), + (73, 246_711_333_592), + (74, 291_874_466_228), + (75, 247_485_227_056), + (76, 291_241_991_316), + (77, 303_154_601_714), + (78, 287_407_417_932), + (79, 254_935_051_664), + (80, 255_413_055_349), + (81, 249_790_431_509), + (82, 261_343_249_180), + (83, 261_361_408_796), + (84, 201_938_003_214), + (85, 264_805_234_604), + (86, 223_171_973_880), + (87, 180_397_358_280), + (88, 270_596_039_760), + (89, 286_399_608_951), + (90, 267_684_201_301), + (91, 284_637_542_762), + (92, 288_373_410_868), + (93, 290_836_604_849), + (94, 270_861_792_144), + (95, 210_595_055_304), + (96, 315_263_727_200), + (97, 158_244_884_792), + (98, 168_102_223_900), + (99, 252_153_339_800), + (100, 378_230_014_000), + (101, 205_977_765_866), + (102, 149_434_017_849), + (103, 135_476_471_008), + (104, 147_970_415_680), + (105, 122_003_668_139), + (106, 133_585_556_570), + (107, 200_137_144_216), + (108, 106_767_623_816), + (109, 124_280_483_748), + (110, 186_420_726_696), + (111, 249_855_564_892), + (112, 196_761_272_984), + (113, 147_120_048_727), + (114, 84_021_895_534), + (115, 98_002_215_656), + (116, 89_944_262_256), + (117, 107_183_582_952), + (118, 110_644_724_664), + (119, 99_380_483_902), + (120, 138_829_019_156), + (121, 111_988_743_976), + (122, 130_264_686_152), + (123, 118_034_291_488), + (124, 79_312_501_676), + (125, 43_214_310_704), + (126, 64_755_449_962), + (127, 97_101_698_382), + (128, 145_645_807_991), ]; - // ── run migration ──────────────────────────────────────────────────── + // Run migration let weight = crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); assert!(!weight.is_zero(), "migration weight should be > 0"); - // ── validate: build a (u16 -> u64) map directly from storage iterator ─ + // Read back storage as (u16 -> u64) let actual: BTreeMap = SubnetLocked::::iter() .map(|(k, v)| (k.saturated_into::(), u64::from(v))) .collect(); let expected: BTreeMap = EXPECTED.iter().copied().collect(); - // 1) exact content match (keys and values) - assert_eq!(actual, expected, "SubnetLocked map mismatch with snapshot"); + // 1) exact content + assert_eq!( + actual, expected, + "SubnetLocked map mismatch for 65..128 snapshot" + ); - // 2) count and total sum match expectations + // 2) count and total let expected_len = expected.len(); let expected_sum: u128 = expected.values().map(|v| *v as u128).sum(); @@ -1923,22 +1921,19 @@ fn test_migrate_restore_subnet_locked_feb1_2025() { assert_eq!(count_after, expected_len, "entry count mismatch"); assert_eq!(sum_after, expected_sum, "total RAO sum mismatch"); - // ── migration flag ─────────────────────────────────────────────────── + // 3) migration flag set assert!( HasMigrationRun::::get(name.clone()), "HasMigrationRun should be true after migration" ); - // ── idempotence: re-running does not change storage ───────────────── - let before = actual; - + // 4) idempotence + let before = actual.clone(); let _again = crate::migrations::migrate_subnet_locked::migrate_restore_subnet_locked::(); - let after: BTreeMap = SubnetLocked::::iter() .map(|(k, v)| (k.saturated_into::(), u64::from(v))) .collect(); - assert_eq!( before, after, "re-running the migration should not change storage" From 9945e3deb4add33ea4f43e881d88f69cae4b979c Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 19:45:57 -0700 Subject: [PATCH 279/379] initial --- .github/workflows/docker-localnet.yml | 96 +++++++++++++++++++++------ Dockerfile-localnet | 24 +++++-- 2 files changed, 95 insertions(+), 25 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 13068682c7..faa2374403 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -15,7 +15,7 @@ on: - main - testnet - devnet - + concurrency: group: docker-localnet-${{ github.ref }} cancel-in-progress: true @@ -27,29 +27,88 @@ permissions: security-events: write jobs: - publish: - runs-on: SubtensorCI - + setup: + runs-on: ubuntu-latest + outputs: + tag: ${{ steps.vars.outputs.tag }} + ref: ${{ steps.vars.outputs.ref }} + latest_tag: ${{ steps.vars.outputs.latest_tag }} steps: - - name: Determine Docker tag and ref - id: tag + - id: vars run: | branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "Determined branch or tag: $branch_or_tag" - echo "tag=$branch_or_tag" >> $GITHUB_ENV - echo "ref=$branch_or_tag" >> $GITHUB_ENV + echo "tag=$branch_or_tag" >> $GITHUB_OUTPUT + echo "ref=$branch_or_tag" >> $GITHUB_OUTPUT - # Check if this is a tagged release (not devnet-ready/devnet/testnet) if [[ "$branch_or_tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_ENV + echo "latest_tag=true" >> $GITHUB_OUTPUT + else + echo "latest_tag=false" >> $GITHUB_OUTPUT + fi + + # build artifacts for fast-runtime and non-fast-runtime + build: + needs: setup + strategy: + matrix: + runtime: ["fast-runtime", "non-fast-runtime"] + runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ needs.setup.outputs.ref }} + + - name: Patch limits for local run + run: | + chmod +x ./scripts/localnet_patch.sh + ./scripts/localnet_patch.sh + + - name: Build binaries + run: | + if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then + ./scripts/localnet.sh --build-only else - echo "latest_tag=false" >> $GITHUB_ENV + ./scripts/localnet.sh False --build-only fi + - name: Prepare artifacts for upload + run: | + mkdir -p upload + cp -v snapshot.json upload/ || true + cp -v scripts/localnet.sh upload/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor upload/node-subtensor || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm upload/ || true + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: artifacts-${{ matrix.runtime }} + path: upload/ + + # collect all artifacts and publish them to docker repo + publish: + needs: [setup, build] + runs-on: SubtensorCI + + steps: - name: Checkout code uses: actions/checkout@v4 with: - ref: ${{ env.ref }} + ref: ${{ needs.setup.outputs.ref }} + + - name: Download fast-runtime artifacts + uses: actions/download-artifact@v4 + with: + name: artifacts-fast-runtime + path: bin/fast-runtime + + - name: Download non-fast-runtime artifacts + uses: actions/download-artifact@v4 + with: + name: artifacts-non-fast-runtime + path: bin/non-fast-runtime - name: Show current Git branch run: | @@ -71,20 +130,17 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Patch non-fast-block node - run: | - chmod +x ./scripts/localnet_patch.sh - ./scripts/localnet_patch.sh - - name: Build and push Docker image uses: docker/build-push-action@v6 with: context: . file: Dockerfile-localnet + build-args: | + BUILT_IN_CI="Boom shakalaka" push: true platforms: linux/amd64,linux/arm64 tags: | - ghcr.io/${{ github.repository }}-localnet:${{ env.tag }} - ${{ env.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} + ghcr.io/${{ github.repository }}-localnet:${{ needs.setup.outputs.tag }} + ${{ needs.setup.outputs.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} cache-from: type=gha cache-to: type=gha,mode=max diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 491362e293..4522c6080c 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -6,6 +6,10 @@ SHELL ["/bin/bash", "-c"] # Set noninteractive mode for apt-get ARG DEBIAN_FRONTEND=noninteractive +# Set default to 0 (if not overridden by build arg) +ARG BUILT_IN_CI=0 +ENV BUILT_IN_CI=${BUILT_IN_CI} + LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.vendor="Opentensor Foundation" \ ai.opentensor.image.title="opentensor/subtensor-localnet" \ @@ -27,13 +31,23 @@ WORKDIR /build RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y ENV PATH="/root/.cargo/bin:${PATH}" RUN rustup toolchain install 1.88.0 --profile minimal -RUN rustup default 1.88.0'' +RUN rustup default 1.88.0 RUN rustup target add wasm32v1-none -## Build fast-runtime node -RUN ./scripts/localnet.sh --build-only -# Build non-fast-runtime -RUN ./scripts/localnet.sh False --build-only + +# Echo the value +RUN echo "BUILT_IN_CI=$BUILT_IN_CI" +RUN if [ -z "$BUILT_IN_CI" ]; then \ + ./scripts/localnet.sh --build-only && \ + ./scripts/localnet.sh False --build-only ; \ + else \ + echo "BUILT_IN_CI is set → skipping builds."; \ + fi + +### Build fast-runtime node +#RUN ./scripts/localnet.sh --build-only +## Build non-fast-runtime +#RUN ./scripts/localnet.sh False --build-only # Verify the binaries was produced RUN test -e /build/target/fast-runtime/release/node-subtensor From de54e3c4ac2664078451bffe3ce706b1d045ebd6 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 20:40:52 -0700 Subject: [PATCH 280/379] add install_build_env.sh and install rust if required --- .github/workflows/docker-localnet.yml | 5 +++ Dockerfile-localnet | 25 ++++++-------- scripts/install_build_env.sh | 50 +++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 15 deletions(-) create mode 100644 scripts/install_build_env.sh diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index faa2374403..4db6516993 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -60,6 +60,11 @@ jobs: with: ref: ${{ needs.setup.outputs.ref }} + - name: Install rust + dependencies + run: | + chmod +x ./scripts/install_build_env.sh + ./scripts/install_build_env.sh + - name: Patch limits for local run run: | chmod +x ./scripts/localnet_patch.sh diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 4522c6080c..3ae1c09932 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -16,32 +16,27 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.description="Opentensor Subtensor Blockchain" \ ai.opentensor.image.documentation="https://docs.bittensor.com" -# Set up Rust environment -ENV RUST_BACKTRACE=1 - -RUN sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list -RUN apt-get update -RUN apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev - -# Copy entire repository +# Copy repo first (you want this *before* RUN to enable layer cache reuse) COPY . /build WORKDIR /build -# Install Rust -RUN set -o pipefail && curl https://sh.rustup.rs -sSf | sh -s -- -y +# Set up env var +ARG BUILT_IN_CI +ENV BUILT_IN_CI=${BUILT_IN_CI} +ENV RUST_BACKTRACE=1 ENV PATH="/root/.cargo/bin:${PATH}" -RUN rustup toolchain install 1.88.0 --profile minimal -RUN rustup default 1.88.0 -RUN rustup target add wasm32v1-none - # Echo the value RUN echo "BUILT_IN_CI=$BUILT_IN_CI" +# Install deps if $BUILT_IN_CI wasn't passed RUN if [ -z "$BUILT_IN_CI" ]; then \ + echo "[*] Installing env and building binaries..." && \ + chmod +x ./scripts/install_build_env.sh && \ + ./scripts/install_build_env.sh && \ ./scripts/localnet.sh --build-only && \ ./scripts/localnet.sh False --build-only ; \ else \ - echo "BUILT_IN_CI is set → skipping builds."; \ + echo "[*] BUILT_IN_CI is set → skipping install + build."; \ fi ### Build fast-runtime node diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh new file mode 100644 index 0000000000..b1aeac6386 --- /dev/null +++ b/scripts/install_build_env.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -e + +echo "[*] Detecting platform..." +UNAME_OUT="$(uname -s)" +case "${UNAME_OUT}" in + Linux*) OS=Linux;; + Darwin*) OS=Mac;; + *) OS="UNKNOWN:${UNAME_OUT}" +esac + +echo "[+] Platform: $OS" + +if [ "$OS" = "Linux" ]; then + echo "[+] Installing dependencies on Linux..." + sudo sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true + sudo apt-get update + sudo apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + +elif [ "$OS" = "Mac" ]; then + echo "[+] Installing dependencies on macOS..." + # Check if brew is installed + if ! command -v brew &> /dev/null; then + echo "[!] Homebrew not found. Installing..." + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + eval "$(/opt/homebrew/bin/brew shellenv)" + fi + + brew install protobuf openssl llvm pkg-config + + LDFLAGS="-L$(brew --prefix openssl)/lib" + export LDFLAGS + + CPPFLAGS="-I$(brew --prefix openssl)/include" + export CPPFLAGS + +else + echo "[!] Unsupported OS: $OS" + exit 1 +fi + +echo "[+] Installing Rust toolchain..." +curl https://sh.rustup.rs -sSf | sh -s -- -y + +# Activate rust in shell +source "$HOME/.cargo/env" || export PATH="$HOME/.cargo/bin:$PATH" + +rustup toolchain install 1.88.0 --profile minimal +rustup default 1.88.0 +rustup target add wasm32v1-none From 1f5a3e183b395c1a8c030658b3879440ec5b42e8 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 20:51:12 -0700 Subject: [PATCH 281/379] opps sudo --- Dockerfile-localnet | 4 ---- scripts/install_build_env.sh | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 3ae1c09932..9c0fbfc589 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -6,10 +6,6 @@ SHELL ["/bin/bash", "-c"] # Set noninteractive mode for apt-get ARG DEBIAN_FRONTEND=noninteractive -# Set default to 0 (if not overridden by build arg) -ARG BUILT_IN_CI=0 -ENV BUILT_IN_CI=${BUILT_IN_CI} - LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ ai.opentensor.image.vendor="Opentensor Foundation" \ ai.opentensor.image.title="opentensor/subtensor-localnet" \ diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index b1aeac6386..1c76515dff 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -13,9 +13,9 @@ echo "[+] Platform: $OS" if [ "$OS" = "Linux" ]; then echo "[+] Installing dependencies on Linux..." - sudo sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true - sudo apt-get update - sudo apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true + apt-get update + apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev elif [ "$OS" = "Mac" ]; then echo "[+] Installing dependencies on macOS..." From 7a7eda7f50d795ab8737591fe67e3e7412fe7aef Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:00:05 -0700 Subject: [PATCH 282/379] changing the runner for a quick test --- .github/workflows/docker-localnet.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 4db6516993..fd269b3183 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -52,7 +52,8 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] +# runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] + runs-on: SubtensorCI steps: - name: Checkout code @@ -95,7 +96,7 @@ jobs: # collect all artifacts and publish them to docker repo publish: needs: [setup, build] - runs-on: SubtensorCI + runs-on: [self-hosted, type-cax11] steps: - name: Checkout code From f4374d92ff058c9200a33b031f51be0fa7d3de03 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:07:34 -0700 Subject: [PATCH 283/379] sudo --- scripts/install_build_env.sh | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index 1c76515dff..5df411b090 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -11,14 +11,34 @@ esac echo "[+] Platform: $OS" +# Determine if we have root privileges +if [ "$(id -u)" -eq 0 ]; then + SUDO="" +else + if command -v sudo &>/dev/null; then + SUDO="sudo" + else + SUDO="" + fi +fi + +# Linux system dependencies if [ "$OS" = "Linux" ]; then echo "[+] Installing dependencies on Linux..." - sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true - apt-get update - apt-get install -y curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + if [ -z "$SUDO" ] && [ "$(id -u)" -ne 0 ]; then + echo "[!] Warning: No sudo and not root. Skipping apt install." + else + $SUDO sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true + $SUDO apt-get update + $SUDO apt-get install -y --no-install-recommends \ + curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + fi + +# macOS system dependencies elif [ "$OS" = "Mac" ]; then echo "[+] Installing dependencies on macOS..." + # Check if brew is installed if ! command -v brew &> /dev/null; then echo "[!] Homebrew not found. Installing..." @@ -48,3 +68,5 @@ source "$HOME/.cargo/env" || export PATH="$HOME/.cargo/bin:$PATH" rustup toolchain install 1.88.0 --profile minimal rustup default 1.88.0 rustup target add wasm32v1-none + +echo "[✓] Environment setup complete." \ No newline at end of file From 66e6c7e7e654f08df50b63cf43c7d96751cc004d Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:10:00 -0700 Subject: [PATCH 284/379] sudo for self-hosted runner + install ca-certificates --- Dockerfile-localnet | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 9c0fbfc589..581f13fe5e 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -22,6 +22,9 @@ ENV BUILT_IN_CI=${BUILT_IN_CI} ENV RUST_BACKTRACE=1 ENV PATH="/root/.cargo/bin:${PATH}" +## Ubdate certificates +RUN apt-get update && apt-get install -y ca-certificates + # Echo the value RUN echo "BUILT_IN_CI=$BUILT_IN_CI" # Install deps if $BUILT_IN_CI wasn't passed @@ -62,9 +65,6 @@ RUN chmod +x /scripts/localnet.sh COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm -## Ubdate certificates -RUN apt-get update && apt-get install -y ca-certificates - # Do not build (just run) ENV BUILD_BINARY=0 # Switch to local run with IP 0.0.0.0 within docker image From e28d777f953814933761e1e3a535e71db4b118eb Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:13:10 -0700 Subject: [PATCH 285/379] replace runners --- .github/workflows/docker-localnet.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index fd269b3183..2c1dc935c9 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -52,8 +52,8 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] -# runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] - runs-on: SubtensorCI + runs-on: [self-hosted, type-cax41] +# runs-on: SubtensorCI steps: - name: Checkout code @@ -96,7 +96,8 @@ jobs: # collect all artifacts and publish them to docker repo publish: needs: [setup, build] - runs-on: [self-hosted, type-cax11] +# runs-on: [self-hosted, type-cax11] + runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] steps: - name: Checkout code From aed1973b7af964b67f4f0ad6dac1db4df747779f Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:20:04 -0700 Subject: [PATCH 286/379] export PATH="$HOME/.cargo/bin:$PATH" --- .github/workflows/docker-localnet.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 2c1dc935c9..1625847da9 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -73,6 +73,7 @@ jobs: - name: Build binaries run: | + export PATH="$HOME/.cargo/bin:$PATH" if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then ./scripts/localnet.sh --build-only else From 365dedc78f13748c34f15d37898fe1ef8d0bf1ac Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 21:50:22 -0700 Subject: [PATCH 287/379] add certificate in the end --- Dockerfile-localnet | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 581f13fe5e..0666f841c9 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -65,6 +65,9 @@ RUN chmod +x /scripts/localnet.sh COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm +## Ubdate certificates for inner image +RUN apt-get update && apt-get install -y ca-certificates + # Do not build (just run) ENV BUILD_BINARY=0 # Switch to local run with IP 0.0.0.0 within docker image From 7dd0e058cc4173da46f089813d236cf6aee235eb Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 22:27:32 -0700 Subject: [PATCH 288/379] no so much power --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 1625847da9..9410c2155d 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -52,7 +52,7 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, type-cax41] + runs-on: [self-hosted, type-cax31] # runs-on: SubtensorCI steps: @@ -98,7 +98,7 @@ jobs: publish: needs: [setup, build] # runs-on: [self-hosted, type-cax11] - runs-on: [self-hosted, type-cax41, image-arm-app-docker-ce] + runs-on: [self-hosted, type-cax31, image-arm-app-docker-ce] steps: - name: Checkout code From e3b45cc935b8066f4a1f8dc4e51b2f1da1436e73 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 22:47:02 -0700 Subject: [PATCH 289/379] type-ccx33 --- .github/workflows/docker-localnet.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 9410c2155d..f50da404ef 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -52,7 +52,7 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, type-cax31] + runs-on: [self-hosted, type-ccx33] # runs-on: SubtensorCI steps: @@ -97,8 +97,8 @@ jobs: # collect all artifacts and publish them to docker repo publish: needs: [setup, build] -# runs-on: [self-hosted, type-cax11] - runs-on: [self-hosted, type-cax31, image-arm-app-docker-ce] + + runs-on: [self-hosted, type-ccx33, image-arm-app-docker-ce] steps: - name: Checkout code From 3efe19a221be815e6882061138a6ddc25bd03260 Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 22:51:19 -0700 Subject: [PATCH 290/379] cax41 --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index f50da404ef..7fb6e546d1 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -52,7 +52,7 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, type-ccx33] + runs-on: [self-hosted, cax41] # runs-on: SubtensorCI steps: @@ -98,7 +98,7 @@ jobs: publish: needs: [setup, build] - runs-on: [self-hosted, type-ccx33, image-arm-app-docker-ce] + runs-on: [self-hosted, cax41, image-arm-app-docker-ce] steps: - name: Checkout code From 1df420d3ecd43ade459678474456e1225439212c Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 23:04:12 -0700 Subject: [PATCH 291/379] diff logic for id: vars --- .github/workflows/docker-localnet.yml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 7fb6e546d1..1da80fa1db 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -36,9 +36,16 @@ jobs: steps: - id: vars run: | - branch_or_tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "tag=$branch_or_tag" >> $GITHUB_OUTPUT - echo "ref=$branch_or_tag" >> $GITHUB_OUTPUT + tag="${{ github.event.inputs.branch-or-tag }}" + ref="${{ github.ref_name }}" + + if [[ -n "$tag" ]]; then + echo "tag=$tag" >> $GITHUB_OUTPUT + else + echo "tag=$ref" >> $GITHUB_OUTPUT + fi + + echo "ref=$ref" >> $GITHUB_OUTPUT if [[ "$branch_or_tag" != "devnet-ready" ]]; then echo "latest_tag=true" >> $GITHUB_OUTPUT From a287983c21765788b4452ab97b76ac4b3c7a159d Mon Sep 17 00:00:00 2001 From: Roman Date: Wed, 17 Sep 2025 23:27:37 -0700 Subject: [PATCH 292/379] equal --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 1da80fa1db..3187f09e19 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -105,7 +105,7 @@ jobs: publish: needs: [setup, build] - runs-on: [self-hosted, cax41, image-arm-app-docker-ce] + runs-on: [self-hosted, cax41] steps: - name: Checkout code From a23a86c846f7d566edbe4ef21f576e9b59ed9c00 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 00:12:14 -0700 Subject: [PATCH 293/379] add `Reconstruct target layout` step --- .github/workflows/docker-localnet.yml | 42 +++++++++++++++++++-------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 3187f09e19..2a4d571537 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: inputs: branch-or-tag: - description: "Branch or tag to use for the Docker image tag and ref to checkout (optional)" + description: "Ветвь или тег для использования в качестве тега образа Docker (optional)." required: false default: "" push: @@ -36,21 +36,20 @@ jobs: steps: - id: vars run: | - tag="${{ github.event.inputs.branch-or-tag }}" - ref="${{ github.ref_name }}" - - if [[ -n "$tag" ]]; then - echo "tag=$tag" >> $GITHUB_OUTPUT + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + echo "ref=${{ github.head_ref }}" >> $GITHUB_ENV + echo "tag=${{ github.base_ref }}" >> $GITHUB_ENV else - echo "tag=$ref" >> $GITHUB_OUTPUT + tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" + echo "ref=${{ github.ref_name }}" >> $GITHUB_ENV + echo "tag=$tag" >> $GITHUB_ENV fi - echo "ref=$ref" >> $GITHUB_OUTPUT - - if [[ "$branch_or_tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_OUTPUT + # Check if this is a tagged release (not devnet-ready/devnet/testnet) + if [[ "$tag" != "devnet-ready" ]]; then + echo "latest_tag=true" >> $GITHUB_ENV else - echo "latest_tag=false" >> $GITHUB_OUTPUT + echo "latest_tag=false" >> $GITHUB_ENV fi # build artifacts for fast-runtime and non-fast-runtime @@ -125,6 +124,25 @@ jobs: name: artifacts-non-fast-runtime path: bin/non-fast-runtime + - name: Reconstruct target layout + run: | + # Copy snapshot + script + cp bin/fast-runtime/snapshot.json . + cp bin/fast-runtime/localnet.sh scripts/localnet.sh + chmod +x scripts/localnet.sh + + # Reconstruct fast-runtime + mkdir -p target/fast-runtime/release/wbuild/node-subtensor-runtime + cp bin/fast-runtime/node-subtensor target/fast-runtime/release/node-subtensor + cp bin/fast-runtime/node_subtensor_runtime.compact.compressed.wasm \ + target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm + + # Reconstruct non-fast-runtime + mkdir -p target/non-fast-runtime/release/wbuild/node-subtensor-runtime + cp bin/non-fast-runtime/node-subtensor target/non-fast-runtime/release/node-subtensor + cp bin/non-fast-runtime/node_subtensor_runtime.compact.compressed.wasm \ + target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm + - name: Show current Git branch run: | echo "===============================" From 4682d552359e75a8b2c48aa4a6c21aec3ada4232 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 00:15:34 -0700 Subject: [PATCH 294/379] cleanup --- .github/workflows/docker-localnet.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 2a4d571537..81372806a4 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -6,7 +6,7 @@ on: workflow_dispatch: inputs: branch-or-tag: - description: "Ветвь или тег для использования в качестве тега образа Docker (optional)." + description: "The branch or tag to use as the Docker image tag (optional)." required: false default: "" push: @@ -59,7 +59,6 @@ jobs: matrix: runtime: ["fast-runtime", "non-fast-runtime"] runs-on: [self-hosted, cax41] -# runs-on: SubtensorCI steps: - name: Checkout code @@ -100,7 +99,7 @@ jobs: name: artifacts-${{ matrix.runtime }} path: upload/ - # collect all artifacts and publish them to docker repo + # Collect all artifacts and publish them to docker repo publish: needs: [setup, build] @@ -124,6 +123,7 @@ jobs: name: artifacts-non-fast-runtime path: bin/non-fast-runtime + # Reconstruct target layout for fast-runtime and non-fast-runtime as required by Dockerfile-localnet - name: Reconstruct target layout run: | # Copy snapshot + script From f1248a5c4f57b22cd6fbc6fa10bb13160d4992e7 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 00:19:32 -0700 Subject: [PATCH 295/379] add WARNING --- scripts/install_build_env.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index 5df411b090..ab18da5134 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -1,4 +1,17 @@ #!/bin/bash + +echo "" +echo "######################################################################" +echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" +echo "### ###" +echo "### This script is used by: ###" +echo "### • .github/workflows/docker-localnet.yml ###" +echo "### • Dockerfile-localnet ###" +echo "### ###" +echo "### Any changes may break CI builds or local Docker environments. ###" +echo "######################################################################" +echo "" + set -e echo "[*] Detecting platform..." From efd2eaa6bde5fc884d5deb7c5820b16fe4c004c7 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 00:39:49 -0700 Subject: [PATCH 296/379] $GITHUB_ENV -> GITHUB_OUTPUT --- .github/workflows/docker-localnet.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 81372806a4..88b76744ff 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -34,22 +34,22 @@ jobs: ref: ${{ steps.vars.outputs.ref }} latest_tag: ${{ steps.vars.outputs.latest_tag }} steps: - - id: vars + - name: Determine Docker tag and ref + id: vars run: | if [[ "${{ github.event_name }}" == "pull_request" ]]; then - echo "ref=${{ github.head_ref }}" >> $GITHUB_ENV - echo "tag=${{ github.base_ref }}" >> $GITHUB_ENV + echo "ref=${{ github.head_ref }}" >> $GITHUB_OUTPUT + echo "tag=${{ github.base_ref }}" >> $GITHUB_OUTPUT else tag="${{ github.event.inputs.branch-or-tag || github.ref_name }}" - echo "ref=${{ github.ref_name }}" >> $GITHUB_ENV - echo "tag=$tag" >> $GITHUB_ENV + echo "ref=${{ github.ref_name }}" >> $GITHUB_OUTPUT + echo "tag=$tag" >> $GITHUB_OUTPUT fi - - # Check if this is a tagged release (not devnet-ready/devnet/testnet) + if [[ "$tag" != "devnet-ready" ]]; then - echo "latest_tag=true" >> $GITHUB_ENV + echo "latest_tag=true" >> $GITHUB_OUTPUT else - echo "latest_tag=false" >> $GITHUB_ENV + echo "latest_tag=false" >> $GITHUB_OUTPUT fi # build artifacts for fast-runtime and non-fast-runtime From 58a6a6b386bd2c9c0500b489bee7a33462af414a Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 01:16:06 -0700 Subject: [PATCH 297/379] add build to publish --- .github/workflows/docker-localnet.yml | 7 +++++++ Dockerfile-localnet | 5 ----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 88b76744ff..e8c8648ccc 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -143,6 +143,13 @@ jobs: cp bin/non-fast-runtime/node_subtensor_runtime.compact.compressed.wasm \ target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm + - name: Recreate /build directory for Docker COPY + run: | + mkdir -p build + mv target build/ + mv scripts build/ + mv snapshot.json build/ + - name: Show current Git branch run: | echo "===============================" diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 0666f841c9..7f4466e7c1 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -38,11 +38,6 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ echo "[*] BUILT_IN_CI is set → skipping install + build."; \ fi -### Build fast-runtime node -#RUN ./scripts/localnet.sh --build-only -## Build non-fast-runtime -#RUN ./scripts/localnet.sh False --build-only - # Verify the binaries was produced RUN test -e /build/target/fast-runtime/release/node-subtensor RUN test -e /build/target/non-fast-runtime/release/node-subtensor From 3cff7110215f9a7036e121718ca7899c8de3f520 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 01:53:40 -0700 Subject: [PATCH 298/379] debug. where is the folder? :D --- .github/workflows/docker-localnet.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index e8c8648ccc..0ab4ad5db1 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -102,8 +102,10 @@ jobs: # Collect all artifacts and publish them to docker repo publish: needs: [setup, build] - runs-on: [self-hosted, cax41] + defaults: + run: + working-directory: ${{ github.workspace }} steps: - name: Checkout code @@ -170,6 +172,18 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Print full workspace directory tree + run: | + echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" + find "$GITHUB_WORKSPACE" -type f | sort + echo "::endgroup::" + + - name: Debug build context structure + run: | + echo "::group::ls -R ./build" + ls -R ./build + echo "::endgroup::" + - name: Build and push Docker image uses: docker/build-push-action@v6 with: From de351bf369f7348015d6a0f775777e057ca41210 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 02:39:35 -0700 Subject: [PATCH 299/379] boom paths refactoring --- .github/workflows/docker-localnet.yml | 52 ++++++--------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 0ab4ad5db1..41beb6f682 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -90,8 +90,10 @@ jobs: mkdir -p upload cp -v snapshot.json upload/ || true cp -v scripts/localnet.sh upload/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor upload/node-subtensor || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm upload/ || true + + mkdir -p upload/target/${{ matrix.runtime }}/release/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor upload/target/${{ matrix.runtime }}/release/node-subtensor || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm upload/target/${{ matrix.runtime }}/release/node_subtensor_runtime.compact.compressed.wasm || true - name: Upload artifacts uses: actions/upload-artifact@v4 @@ -117,40 +119,20 @@ jobs: uses: actions/download-artifact@v4 with: name: artifacts-fast-runtime - path: bin/fast-runtime + path: . - name: Download non-fast-runtime artifacts uses: actions/download-artifact@v4 with: name: artifacts-non-fast-runtime - path: bin/non-fast-runtime + path: . - # Reconstruct target layout for fast-runtime and non-fast-runtime as required by Dockerfile-localnet - - name: Reconstruct target layout - run: | - # Copy snapshot + script - cp bin/fast-runtime/snapshot.json . - cp bin/fast-runtime/localnet.sh scripts/localnet.sh - chmod +x scripts/localnet.sh - - # Reconstruct fast-runtime - mkdir -p target/fast-runtime/release/wbuild/node-subtensor-runtime - cp bin/fast-runtime/node-subtensor target/fast-runtime/release/node-subtensor - cp bin/fast-runtime/node_subtensor_runtime.compact.compressed.wasm \ - target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm - - # Reconstruct non-fast-runtime - mkdir -p target/non-fast-runtime/release/wbuild/node-subtensor-runtime - cp bin/non-fast-runtime/node-subtensor target/non-fast-runtime/release/node-subtensor - cp bin/non-fast-runtime/node_subtensor_runtime.compact.compressed.wasm \ - target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm - - - name: Recreate /build directory for Docker COPY + # to be make sure + - name: Print full workspace directory tree run: | - mkdir -p build - mv target build/ - mv scripts build/ - mv snapshot.json build/ + echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" + find "$GITHUB_WORKSPACE" -type f | sort + echo "::endgroup::" - name: Show current Git branch run: | @@ -172,18 +154,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Print full workspace directory tree - run: | - echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" - find "$GITHUB_WORKSPACE" -type f | sort - echo "::endgroup::" - - - name: Debug build context structure - run: | - echo "::group::ls -R ./build" - ls -R ./build - echo "::endgroup::" - - name: Build and push Docker image uses: docker/build-push-action@v6 with: From 916be5391765e182dcc036e12294c3835c92de45 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 02:51:38 -0700 Subject: [PATCH 300/379] more --- .github/workflows/docker-localnet.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 41beb6f682..5d497861bb 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -87,13 +87,15 @@ jobs: - name: Prepare artifacts for upload run: | - mkdir -p upload - cp -v snapshot.json upload/ || true - cp -v scripts/localnet.sh upload/ || true + mkdir -p build/scripts/ + cp -v snapshot.json build/ || true + cp -v scripts/localnet.sh build/scripts/localnet.sh || true - mkdir -p upload/target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor upload/target/${{ matrix.runtime }}/release/node-subtensor || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm upload/target/${{ matrix.runtime }}/release/node_subtensor_runtime.compact.compressed.wasm || true + mkdir -p build/target/${{ matrix.runtime }}/release/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor build/target/${{ matrix.runtime }}/release/node-subtensor || true + + mkdir -p build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true - name: Upload artifacts uses: actions/upload-artifact@v4 From 51ba708a9140cdce127ec4af6f227373da95814d Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 03:22:40 -0700 Subject: [PATCH 301/379] use the entire path --- .github/workflows/docker-localnet.yml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 5d497861bb..fa98486bc8 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -87,21 +87,22 @@ jobs: - name: Prepare artifacts for upload run: | - mkdir -p build/scripts/ + mkdir -p ${{ github.workspace }}/build/scripts/ cp -v snapshot.json build/ || true - cp -v scripts/localnet.sh build/scripts/localnet.sh || true + cp -v scripts/localnet.sh ${{ github.workspace }}/build/scripts/localnet.sh || true - mkdir -p build/target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor build/target/${{ matrix.runtime }}/release/node-subtensor || true + mkdir -p ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/node-subtensor || true mkdir -p build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: artifacts-${{ matrix.runtime }} - path: upload/ + path: ${{ github.workspace }}/upload/**/* + if-no-files-found: error # Collect all artifacts and publish them to docker repo publish: From 79afca57429a93508c94d00e09736c168ddae1fc Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 04:04:18 -0700 Subject: [PATCH 302/379] missed `build` --- .github/workflows/docker-localnet.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index fa98486bc8..c4d9626370 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -87,21 +87,21 @@ jobs: - name: Prepare artifacts for upload run: | - mkdir -p ${{ github.workspace }}/build/scripts/ + mkdir -p build/scripts/ cp -v snapshot.json build/ || true - cp -v scripts/localnet.sh ${{ github.workspace }}/build/scripts/localnet.sh || true + cp -v scripts/localnet.sh build/scripts/localnet.sh || true - mkdir -p ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/node-subtensor || true + mkdir -p build/target/${{ matrix.runtime }}/release/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor build/target/${{ matrix.runtime }}/release/node-subtensor || true mkdir -p build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm ${{ github.workspace }}/build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: artifacts-${{ matrix.runtime }} - path: ${{ github.workspace }}/upload/**/* + path: build/ if-no-files-found: error # Collect all artifacts and publish them to docker repo From 6ca26983431e3ff43c1cdc88ea6bbda13521ebc4 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 04:28:08 -0700 Subject: [PATCH 303/379] Download path --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index c4d9626370..eda82c63d2 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -122,13 +122,13 @@ jobs: uses: actions/download-artifact@v4 with: name: artifacts-fast-runtime - path: . + path: build/ - name: Download non-fast-runtime artifacts uses: actions/download-artifact@v4 with: name: artifacts-non-fast-runtime - path: . + path: build/ # to be make sure - name: Print full workspace directory tree From 5bb5cfb4636ba353a8e6c62f87d61913a4b7e3a1 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 04:54:52 -0700 Subject: [PATCH 304/379] replace verify order --- Dockerfile-localnet | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 7f4466e7c1..6c7180a76b 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -38,10 +38,6 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ echo "[*] BUILT_IN_CI is set → skipping install + build."; \ fi -# Verify the binaries was produced -RUN test -e /build/target/fast-runtime/release/node-subtensor -RUN test -e /build/target/non-fast-runtime/release/node-subtensor - FROM $BASE_IMAGE AS subtensor-localnet # Copy binaries @@ -56,6 +52,10 @@ COPY --from=builder /build/snapshot.json /snapshot.json COPY --from=builder /build/scripts/localnet.sh scripts/localnet.sh RUN chmod +x /scripts/localnet.sh +# Verify the binaries was produced +RUN test -e /target/target/fast-runtime/release/node-subtensor +RUN test -e /target/target/non-fast-runtime/release/node-subtensor + # Copy WebAssembly artifacts COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm From d79637c6ea741e176bb6d6f0ec51cbd66b254b6c Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 04:55:43 -0700 Subject: [PATCH 305/379] ubuntu-latest runner for last step --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index eda82c63d2..2ecb843407 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -107,7 +107,7 @@ jobs: # Collect all artifacts and publish them to docker repo publish: needs: [setup, build] - runs-on: [self-hosted, cax41] + runs-on: ubuntu-latest defaults: run: working-directory: ${{ github.workspace }} From 74d75087b3d7544b429560d2a6a615419e121c53 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 18 Sep 2025 09:00:52 -0700 Subject: [PATCH 306/379] Update dispatches.rs --- pallets/subtensor/src/macros/dispatches.rs | 35 ++++++++++++++-------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index d6a199b0f1..991a6aefe8 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -457,18 +457,18 @@ mod dispatches { /// * `TooManyUnrevealedCommits`: /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// - #[pallet::call_index(99)] - #[pallet::weight((Weight::from_parts(77_750_000, 0) - .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] - pub fn commit_crv3_weights( - origin: T::RuntimeOrigin, - netuid: NetUid, - commit: BoundedVec>, - reveal_round: u64, - ) -> DispatchResult { - Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) - } + // #[pallet::call_index(99)] + // #[pallet::weight((Weight::from_parts(77_750_000, 0) + // .saturating_add(T::DbWeight::get().reads(9_u64)) + // .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + // pub fn commit_crv3_weights( + // origin: T::RuntimeOrigin, + // netuid: NetUid, + // commit: BoundedVec>, + // reveal_round: u64, + // ) -> DispatchResult { + // Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) + // } /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed for mechanisms. /// @@ -2358,5 +2358,16 @@ mod dispatches { commit_reveal_version, ) } + + /// Remove a subnetwork + /// The caller must be root + #[pallet::call_index(120)] + #[pallet::weight((Weight::from_parts(119_000_000, 0) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(31)), DispatchClass::Operational, Pays::No))] + pub fn root_dissolve_network(origin: OriginFor, netuid: NetUid) -> DispatchResult { + ensure_root(origin)?; + Self::do_dissolve_network(netuid) + } } } From 4f66cec716c34da3cc378a3180e7fdc5188edf35 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Thu, 18 Sep 2025 10:41:14 -0700 Subject: [PATCH 307/379] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index b6857cf7ae..254bec73a3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 317, + spec_version: 318, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From babd79ed78ece0cc1a90e558481e6d20b1cb3c26 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 12:36:37 -0700 Subject: [PATCH 308/379] temporarily disable cache + Verify the binaries was produced --- .github/workflows/docker-localnet.yml | 7 +++++-- Dockerfile-localnet | 10 +++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 2ecb843407..c71fefc690 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -130,6 +130,9 @@ jobs: name: artifacts-non-fast-runtime path: build/ + - name: Print downloaded artifact tree + run: find build/ + # to be make sure - name: Print full workspace directory tree run: | @@ -169,5 +172,5 @@ jobs: tags: | ghcr.io/${{ github.repository }}-localnet:${{ needs.setup.outputs.tag }} ${{ needs.setup.outputs.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} - cache-from: type=gha - cache-to: type=gha,mode=max +# cache-from: type=gha +# cache-to: type=gha,mode=max diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 6c7180a76b..983c3bcd05 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -38,6 +38,10 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ echo "[*] BUILT_IN_CI is set → skipping install + build."; \ fi +# Verify the binaries was produced +RUN test -e /build/target/fast-runtime/release/node-subtensor +RUN test -e /build/target/non-fast-runtime/release/node-subtensor + FROM $BASE_IMAGE AS subtensor-localnet # Copy binaries @@ -52,15 +56,11 @@ COPY --from=builder /build/snapshot.json /snapshot.json COPY --from=builder /build/scripts/localnet.sh scripts/localnet.sh RUN chmod +x /scripts/localnet.sh -# Verify the binaries was produced -RUN test -e /target/target/fast-runtime/release/node-subtensor -RUN test -e /target/target/non-fast-runtime/release/node-subtensor - # Copy WebAssembly artifacts COPY --from=builder /build/target/fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm COPY --from=builder /build/target/non-fast-runtime/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm target/non-fast-runtime/release/node_subtensor_runtime.compact.compressed.wasm -## Ubdate certificates for inner image +# Update certificates for next layer RUN apt-get update && apt-get install -y ca-certificates # Do not build (just run) From 4520bdf324b2480f29ff2ae61e3451494b99be2d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 18 Sep 2025 19:48:00 +0000 Subject: [PATCH 309/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 991a6aefe8..282c0d8c2d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -1314,8 +1314,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(57_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1601,8 +1601,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(36_u64)) - .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, From e03cb4f78a82b1712f7494fa94d9b49db11972cf Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 13:00:02 -0700 Subject: [PATCH 310/379] del path from Download --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index c71fefc690..a55a524179 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -122,13 +122,13 @@ jobs: uses: actions/download-artifact@v4 with: name: artifacts-fast-runtime - path: build/ +# path: build/ - name: Download non-fast-runtime artifacts uses: actions/download-artifact@v4 with: name: artifacts-non-fast-runtime - path: build/ +# path: build/ - name: Print downloaded artifact tree run: find build/ From 9746aa79497ebf7aa4e5fd7f52f7119390b9a065 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 13:23:38 -0700 Subject: [PATCH 311/379] path: . --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index a55a524179..aa281abd69 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -122,13 +122,13 @@ jobs: uses: actions/download-artifact@v4 with: name: artifacts-fast-runtime -# path: build/ + path: . - name: Download non-fast-runtime artifacts uses: actions/download-artifact@v4 with: name: artifacts-non-fast-runtime -# path: build/ + path: . - name: Print downloaded artifact tree run: find build/ From 45cc05140578b68300af6d846d784d9c8615561c Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 13:32:17 -0700 Subject: [PATCH 312/379] ${{ github.workspace }} --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index aa281abd69..10d8557eaa 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -122,13 +122,13 @@ jobs: uses: actions/download-artifact@v4 with: name: artifacts-fast-runtime - path: . + path: ${{ github.workspace }} - name: Download non-fast-runtime artifacts uses: actions/download-artifact@v4 with: name: artifacts-non-fast-runtime - path: . + path: ${{ github.workspace }} - name: Print downloaded artifact tree run: find build/ From 85d1e4ec1e35338b734e25968121d665078bd4d2 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 13:52:17 -0700 Subject: [PATCH 313/379] ops old debug --- .github/workflows/docker-localnet.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 10d8557eaa..584d447f22 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -130,9 +130,6 @@ jobs: name: artifacts-non-fast-runtime path: ${{ github.workspace }} - - name: Print downloaded artifact tree - run: find build/ - # to be make sure - name: Print full workspace directory tree run: | From 37f7b284d308e550affe8e4b1234108804c1cb33 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 16:59:38 -0400 Subject: [PATCH 314/379] Cleanup math: Remove recursion, indexing, assertions, and zipping. Fix get_last_update. --- pallets/subtensor/src/epoch/math.rs | 1325 ++++++++++---------- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- pallets/subtensor/src/subnets/mechanism.rs | 9 + pallets/subtensor/src/tests/math.rs | 183 +-- pallets/subtensor/src/utils/misc.rs | 12 +- 5 files changed, 653 insertions(+), 878 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 11930bf26e..6288ac14ae 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -1,96 +1,79 @@ // we get a compiler warning for this , even though the trait is used in the // quantile function. use crate::alloc::borrow::ToOwned; -#[allow(unused)] -use num_traits::float::Float; use safe_math::*; -use sp_runtime::traits::{CheckedAdd, Saturating}; -use sp_std::cmp::Ordering; +use sp_runtime::traits::CheckedAdd; use sp_std::vec; use substrate_fixed::transcendental::{exp, ln}; use substrate_fixed::types::{I32F32, I64F64}; -// TODO: figure out what cfg gate this needs to not be a warning in rustc -#[allow(unused)] use sp_std::vec::Vec; -#[allow(dead_code)] +pub fn get_safe(slice: &[T], idx: usize) -> T { + slice.get(idx).copied().unwrap_or_default() +} + pub fn fixed(val: f32) -> I32F32 { I32F32::saturating_from_num(val) } -#[allow(dead_code)] pub fn fixed_to_u16(x: I32F32) -> u16 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed_to_u64(x: I32F32) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_u64(x: I64F64) -> u64 { x.saturating_to_num::() } -#[allow(dead_code)] pub fn fixed64_to_fixed32(x: I64F64) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn fixed32_to_fixed64(x: I32F32) -> I64F64 { I64F64::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x) } -#[allow(dead_code)] pub fn u16_proportion_to_fixed(x: u16) -> I32F32 { I32F32::saturating_from_num(x).safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_to_fixed_u16_proportion(x: I32F32) -> I32F32 { x.safe_div(I32F32::saturating_from_num(u16::MAX)) } -#[allow(dead_code)] pub fn fixed_proportion_to_u16(x: I32F32) -> u16 { fixed_to_u16(x.saturating_mul(I32F32::saturating_from_num(u16::MAX))) } -#[allow(dead_code)] pub fn vec_fixed32_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_fixed32(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_fixed32).collect() } -#[allow(dead_code)] pub fn vec_fixed32_to_fixed64(vec: Vec) -> Vec { vec.into_iter().map(fixed32_to_fixed64).collect() } -#[allow(dead_code)] pub fn vec_fixed64_to_u64(vec: Vec) -> Vec { vec.into_iter().map(fixed64_to_u64).collect() } -#[allow(dead_code)] pub fn vec_fixed_proportions_to_u16(vec: Vec) -> Vec { vec.into_iter().map(fixed_proportion_to_u16).collect() } -#[allow(dead_code)] // Max-upscale vector and convert to u16 so max_value = u16::MAX. Assumes non-negative normalized input. pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { let u16_max: I32F32 = I32F32::saturating_from_num(u16::MAX); @@ -136,7 +119,6 @@ pub fn vec_max_upscale_to_u16(vec: &[I32F32]) -> Vec { } } -#[allow(dead_code)] // Max-upscale u16 vector and convert to u16 so max_value = u16::MAX. Assumes u16 vector input. pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { let vec_fixed: Vec = vec @@ -146,7 +128,6 @@ pub fn vec_u16_max_upscale_to_u16(vec: &[u16]) -> Vec { vec_max_upscale_to_u16(&vec_fixed) } -#[allow(dead_code)] // Checks if u16 vector, when normalized, has a max value not greater than a u16 ratio max_limit. pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { let max_limit_fixed: I32F32 = @@ -160,12 +141,10 @@ pub fn check_vec_max_limited(vec: &[u16], max_limit: u16) -> bool { max_value.is_none_or(|v| *v <= max_limit_fixed) } -#[allow(dead_code)] pub fn sum(x: &[I32F32]) -> I32F32 { x.iter().sum() } -#[allow(dead_code)] // Sums a Vector of type that has CheckedAdd trait. // Returns None if overflow occurs during sum using T::checked_add. // Returns Some(T::default()) if input vector is empty. @@ -184,14 +163,12 @@ where } // Return true when vector sum is zero. -#[allow(dead_code)] pub fn is_zero(vector: &[I32F32]) -> bool { let vector_sum: I32F32 = sum(vector); vector_sum == I32F32::saturating_from_num(0) } // Exp safe function with I32F32 output of I32F32 input. -#[allow(dead_code)] pub fn exp_safe(input: I32F32) -> I32F32 { let min_input: I32F32 = I32F32::saturating_from_num(-20); // <= 1/exp(-20) = 485 165 195,4097903 let max_input: I32F32 = I32F32::saturating_from_num(20); // <= exp(20) = 485 165 195,4097903 @@ -218,7 +195,6 @@ pub fn exp_safe(input: I32F32) -> I32F32 { } // Sigmoid safe function with I32F32 output of I32F32 input with offset kappa and (recommended) scaling 0 < rho <= 40. -#[allow(dead_code)] pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { let one: I32F32 = I32F32::saturating_from_num(1); let offset: I32F32 = input.saturating_sub(kappa); // (input - kappa) @@ -231,7 +207,6 @@ pub fn sigmoid_safe(input: I32F32, rho: I32F32, kappa: I32F32) -> I32F32 { } // Returns a bool vector where an item is true if the vector item is in topk values. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vec![true; n]; @@ -239,15 +214,16 @@ pub fn is_topk(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; + } } result } // Returns a bool vector where an item is true if the vector item is in topk values and is non-zero. -#[allow(dead_code, clippy::indexing_slicing)] pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { let n: usize = vector.len(); let mut result: Vec = vector.iter().map(|&elem| elem != I32F32::from(0)).collect(); @@ -255,15 +231,16 @@ pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| &vector[idx]); // ascending stable sort + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort (no indexing) for &idx in idxs.iter().take(n.saturating_sub(k)) { - result[idx] = false; + if let Some(cell) = result.get_mut(idx) { + *cell = false; // no indexing + } } result } // Returns a normalized (sum to 1 except 0) copy of the input vector. -#[allow(dead_code)] pub fn normalize(x: &[I32F32]) -> Vec { let x_sum: I32F32 = sum(x); if x_sum != I32F32::saturating_from_num(0.0_f32) { @@ -274,7 +251,6 @@ pub fn normalize(x: &[I32F32]) -> Vec { } // Normalizes (sum to 1 except 0) the input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize(x: &mut [I32F32]) { let x_sum: I32F32 = x.iter().sum(); if x_sum == I32F32::saturating_from_num(0.0_f32) { @@ -285,7 +261,6 @@ pub fn inplace_normalize(x: &mut [I32F32]) { } // Normalizes (sum to 1 except 0) the input vector directly in-place, using the sum arg. -#[allow(dead_code)] pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { if x_sum == I32F32::saturating_from_num(0.0_f32) { return; @@ -295,7 +270,6 @@ pub fn inplace_normalize_using_sum(x: &mut [I32F32], x_sum: I32F32) { } // Normalizes (sum to 1 except 0) the I64F64 input vector directly in-place. -#[allow(dead_code)] pub fn inplace_normalize_64(x: &mut [I64F64]) { let x_sum: I64F64 = x.iter().sum(); if x_sum == I64F64::saturating_from_num(0) { @@ -306,7 +280,6 @@ pub fn inplace_normalize_64(x: &mut [I64F64]) { } /// Normalizes (sum to 1 except 0) each row (dim=0) of a I64F64 matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_64(x: &mut [Vec]) { for row in x { let row_sum: I64F64 = row.iter().sum(); @@ -318,23 +291,18 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { } /// Returns x / y for input vectors x and y, if y == 0 return 0. -#[allow(dead_code)] pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { - assert_eq!(x.len(), y.len()); - x.iter() - .zip(y) - .map(|(x_i, y_i)| { - if *y_i != 0 { - x_i.safe_div(*y_i) - } else { - I32F32::saturating_from_num(0) - } - }) - .collect() + let zero = I32F32::saturating_from_num(0); + + let mut out = Vec::with_capacity(x.len()); + for (i, x_i) in x.iter().enumerate() { + let y_i = y.get(i).copied().unwrap_or(zero); + out.push(x_i.safe_div(y_i)); + } + out } // Normalizes (sum to 1 except 0) each row (dim=0) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize(x: &mut [Vec]) { for row in x { let row_sum: I32F32 = row.iter().sum(); @@ -346,7 +314,6 @@ pub fn inplace_row_normalize(x: &mut [Vec]) { } // Normalizes (sum to 1 except 0) each row (dim=0) of a sparse matrix in-place. -#[allow(dead_code)] pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { for sparse_row in sparse_matrix.iter_mut() { let row_sum: I32F32 = sparse_row.iter().map(|(_j, value)| *value).sum(); @@ -359,7 +326,6 @@ pub fn inplace_row_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>]) { } // Sum across each row (dim=0) of a matrix. -#[allow(dead_code)] pub fn row_sum(x: &[Vec]) -> Vec { if let Some(first_row) = x.first() { if first_row.is_empty() { @@ -370,7 +336,6 @@ pub fn row_sum(x: &[Vec]) -> Vec { } // Sum across each row (dim=0) of a sparse matrix. -#[allow(dead_code)] pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { sparse_matrix .iter() @@ -378,213 +343,205 @@ pub fn row_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec { .collect() } -// Sum across each column (dim=1) of a matrix. -#[allow(dead_code)] -pub fn col_sum(x: &[Vec]) -> Vec { - let Some(first_row) = x.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; - } - x.iter().fold( - vec![I32F32::saturating_from_num(0); cols], - |acc, next_row| { - acc.into_iter() - .zip(next_row) - .map(|(acc_elem, next_elem)| acc_elem.saturating_add(*next_elem)) - .collect() - }, - ) -} - -// Sum across each column (dim=1) of a sparse matrix. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn col_sum_sparse(sparse_matrix: &[Vec<(u16, I32F32)>], columns: u16) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0); columns as usize]; - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - result[*j as usize] = result[*j as usize].saturating_add(*value); - } - } - result -} - // Normalizes (sum to 1 except 0) each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_normalize_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_sum: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_sum: Vec = vec![zero; columns as usize]; + + // Pass 1: accumulate column sums. for sparse_row in sparse_matrix.iter() { - for (j, value) in sparse_row.iter() { - col_sum[*j as usize] = col_sum[*j as usize].saturating_add(*value); + for &(j, value) in sparse_row.iter() { + if let Some(sum) = col_sum.get_mut(j as usize) { + *sum = sum.saturating_add(value); + } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_sum[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: normalize by column sums where non-zero. + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let denom = col_sum.get(*j as usize).copied().unwrap_or(zero); + if denom != zero { + *value = value.safe_div(denom); } - *value = value.safe_div(col_sum[*j as usize]); } } } // Normalizes (sum to 1 except 0) each column (dim=1) of a matrix in-place. -#[allow(dead_code)] +// If a row is shorter/longer than the accumulator, pad with zeroes accordingly. pub fn inplace_col_normalize(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Build column sums; treat missing entries as zero, but don't modify rows. + let mut col_sums: Vec = Vec::new(); + for row in x.iter() { + if col_sums.len() < row.len() { + col_sums.resize(row.len(), zero); + } + let mut sums_it = col_sums.iter_mut(); + for v in row.iter() { + if let Some(sum) = sums_it.next() { + *sum = sum.saturating_add(*v); + } else { + break; + } + } + } + + if col_sums.is_empty() { return; } - let cols = first_row.len(); - let col_sums = x - .iter_mut() - .fold(vec![I32F32::saturating_from_num(0.0); cols], |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(&mut m_val, acc_val)| acc_val.saturating_add(m_val)) - .collect() - }); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_sums) - .filter(|(_, col_sum)| **col_sum != I32F32::saturating_from_num(0_f32)) - .for_each(|(m_val, col_sum)| { - *m_val = m_val.safe_div(*col_sum); - }); - }); + + // Normalize only existing elements in each row. + for row in x.iter_mut() { + let mut sums_it = col_sums.iter(); + for m in row.iter_mut() { + if let Some(sum) = sums_it.next() { + if *sum != zero { + *m = m.safe_div(*sum); + } + } else { + break; + } + } + } } // Max-upscale each column (dim=1) of a sparse matrix in-place. -#[allow(dead_code, clippy::indexing_slicing)] pub fn inplace_col_max_upscale_sparse(sparse_matrix: &mut [Vec<(u16, I32F32)>], columns: u16) { - let mut col_max: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; // assume square matrix, rows=cols + let zero = I32F32::saturating_from_num(0.0); + let mut col_max: Vec = vec![zero; columns as usize]; + + // Pass 1: compute per-column max for sparse_row in sparse_matrix.iter() { for (j, value) in sparse_row.iter() { - if col_max[*j as usize] < *value { - col_max[*j as usize] = *value; + if let Some(m) = col_max.get_mut(*j as usize) { + if *m < *value { + *m = *value; + } } } } - for sparse_row in sparse_matrix { - for (j, value) in sparse_row { - if col_max[*j as usize] == I32F32::saturating_from_num(0.0_f32) { - continue; + + // Pass 2: divide each nonzero entry by its column max + for sparse_row in sparse_matrix.iter_mut() { + for (j, value) in sparse_row.iter_mut() { + let m = col_max.get(*j as usize).copied().unwrap_or(zero); + if m != zero { + *value = value.safe_div(m); } - *value = value.safe_div(col_max[*j as usize]); } } } // Max-upscale each column (dim=1) of a matrix in-place. -#[allow(dead_code)] pub fn inplace_col_max_upscale(x: &mut [Vec]) { - let Some(first_row) = x.first() else { - return; - }; - if first_row.is_empty() { + let zero = I32F32::saturating_from_num(0.0); + + // Find the widest row to size the column-max buffer; don't modify rows. + let max_cols = x.iter().map(|r| r.len()).max().unwrap_or(0); + if max_cols == 0 { return; } - let cols = first_row.len(); - let col_maxes = x.iter_mut().fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, row| { - row.iter_mut() - .zip(acc) - .map(|(m_val, acc_val)| acc_val.max(*m_val)) - .collect() - }, - ); - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(&col_maxes) - .filter(|(_, col_max)| **col_max != I32F32::saturating_from_num(0)) - .for_each(|(m_val, col_max)| { - *m_val = m_val.safe_div(*col_max); - }); - }); + + // Pass 1: compute per-column maxima across existing entries only. + let mut col_maxes = vec![zero; max_cols]; + for row in x.iter() { + let mut max_it = col_maxes.iter_mut(); + for v in row.iter() { + if let Some(m) = max_it.next() { + if *m < *v { + *m = *v; + } + } else { + break; + } + } + } + + // Pass 2: divide each existing entry by its column max (if non-zero). + for row in x.iter_mut() { + let mut max_it = col_maxes.iter(); + for val in row.iter_mut() { + if let Some(&m) = max_it.next() { + if m != zero { + *val = val.safe_div(m); + } + } else { + break; + } + } + } } // Apply mask to vector, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { if mask.is_empty() { return; } - assert_eq!(mask.len(), vector.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter() - .zip(vector) - .filter(|(m, _)| **m) - .for_each(|(_, v_elem)| { - *v_elem = zero; - }); + for (i, v) in vector.iter_mut().enumerate() { + if *mask.get(i).unwrap_or(&true) { + *v = zero; + } + } } // Apply mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] -pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut Vec>) { +pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { let Some(first_row) = mask.first() else { return; }; if first_row.is_empty() { return; } - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0.0); - mask.iter().zip(matrix).for_each(|(mask_row, matrix_row)| { - mask_row - .iter() - .zip(matrix_row) - .filter(|(mask_elem, _)| **mask_elem) - .for_each(|(_, matrix_elem)| { - *matrix_elem = zero; - }); - }); + for (r, row) in matrix.iter_mut().enumerate() { + let mask_row_opt = mask.get(r); + for (c, val) in row.iter_mut().enumerate() { + let should_zero = mask_row_opt + .and_then(|mr| mr.get(c)) + .copied() + .unwrap_or(true); + if should_zero { + *val = zero; + } + } + } } // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. -#[allow(dead_code)] pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { let Some(first_row) = matrix.first() else { return; }; let cols = first_row.len(); - assert_eq!(mask.len(), matrix.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix - .iter_mut() - .zip(mask) - .for_each(|(row_elem, mask_row)| { - if *mask_row { - *row_elem = vec![zero; cols]; - } - }); + for (r, row) in matrix.iter_mut().enumerate() { + if mask.get(r).copied().unwrap_or(true) { + *row = vec![zero; cols]; + } + } } // Apply column mask to matrix, mask=true will mask out, i.e. set to 0. // Assumes each column has the same length. -#[allow(dead_code)] pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { - let Some(first_row) = matrix.first() else { + if matrix.is_empty() { return; }; - assert_eq!(mask.len(), first_row.len()); let zero: I32F32 = I32F32::saturating_from_num(0); - matrix.iter_mut().for_each(|row_elem| { - row_elem.iter_mut().zip(mask).for_each(|(elem, mask_col)| { - if *mask_col { + for row in matrix.iter_mut() { + for (c, elem) in row.iter_mut().enumerate() { + if mask.get(c).copied().unwrap_or(true) { *elem = zero; } - }); - }); + } + } } // Mask out the diagonal of the input matrix in-place. -#[allow(dead_code)] pub fn inplace_mask_diag(matrix: &mut [Vec]) { let Some(first_row) = matrix.first() else { return; @@ -592,7 +549,18 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); + // Weights that we use this function for are always a square matrix. + // If something not square is passed to this function, it's safe to return + // with no action. Log error if this happens. + if matrix.len() != first_row.len() { + log::error!( + "inplace_mask_diag: matrix.len {:?} != first_row.len {:?}", + matrix.len(), + first_row.len() + ); + return; + } + let zero: I32F32 = I32F32::saturating_from_num(0.0); matrix.iter_mut().enumerate().for_each(|(idx, row)| { let Some(elem) = row.get_mut(idx) else { @@ -604,27 +572,29 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { } // Remove cells from sparse matrix where the mask function of a scalar and a vector is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn scalar_vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], scalar: u64, vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(scalar, vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, value) in row.iter() { + let vj = vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(scalar, vj) { + out_row.push((j, value)); } } + result.push(out_row); } + result } // Mask out the diagonal of the input matrix in-place, except for the diagonal entry at except_index. -#[allow(dead_code)] pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: u16) { let Some(first_row) = matrix.first() else { return; @@ -632,7 +602,10 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: if first_row.is_empty() { return; } - assert_eq!(matrix.len(), first_row.len()); + if matrix.len() != first_row.len() { + log::error!("inplace_mask_diag_except_index: input matrix is not square"); + return; + } let diag_at_index = matrix .get(except_index as usize) @@ -651,26 +624,22 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: } // Return a new sparse matrix that replaces masked rows with an empty vector placeholder. -#[allow(dead_code)] pub fn mask_rows_sparse( mask: &[bool], sparse_matrix: &[Vec<(u16, I32F32)>], ) -> Vec> { - assert_eq!(sparse_matrix.len(), mask.len()); - mask.iter() - .zip(sparse_matrix) - .map(|(mask_elem, sparse_row)| { - if *mask_elem { - vec![] - } else { - sparse_row.clone() - } - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + for (i, sparse_row) in sparse_matrix.iter().enumerate() { + if mask.get(i).copied().unwrap_or(true) { + out.push(Vec::new()); + } else { + out.push(sparse_row.clone()); + } + } + out } // Return a new sparse matrix with a masked out diagonal of input sparse matrix. -#[allow(dead_code)] pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec> { sparse_matrix .iter() @@ -687,7 +656,6 @@ pub fn mask_diag_sparse(sparse_matrix: &[Vec<(u16, I32F32)>]) -> Vec], except_index: u16, @@ -709,27 +677,29 @@ pub fn mask_diag_sparse_except_index( } // Remove cells from sparse matrix where the mask function of two vectors is true. -#[allow(dead_code, clippy::indexing_slicing)] pub fn vec_mask_sparse_matrix( sparse_matrix: &[Vec<(u16, I32F32)>], first_vector: &[u64], second_vector: &[u64], mask_fn: &dyn Fn(u64, u64) -> bool, ) -> Vec> { - let n: usize = sparse_matrix.len(); - let mut result: Vec> = vec![vec![]; n]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row { - if !mask_fn(first_vector[i], second_vector[*j as usize]) { - result[i].push((*j, *value)); + let mut result: Vec> = Vec::with_capacity(sparse_matrix.len()); + let mut fv_it = first_vector.iter(); + for row in sparse_matrix.iter() { + let fv = fv_it.next().copied().unwrap_or(0); + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let sv = second_vector.get(j as usize).copied().unwrap_or(0); + if !mask_fn(fv, sv) { + out_row.push((j, val)); } } + result.push(out_row); } result } // Row-wise matrix-vector hadamard product. -#[allow(dead_code)] pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec> { let Some(first_row) = matrix.first() else { return vec![vec![]]; @@ -737,37 +707,43 @@ pub fn row_hadamard(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], ) -> Vec> { - sparse_matrix - .iter() - .zip(vector) - .map(|(sparse_row, vec_val)| { - sparse_row - .iter() - .map(|(j, value)| (*j, value.saturating_mul(*vec_val))) - .collect() - }) - .collect() + let mut out = Vec::with_capacity(sparse_matrix.len()); + let mut vec_it = vector.iter(); + + for sparse_row in sparse_matrix.iter() { + let Some(&scale) = vec_it.next() else { break }; + let mut new_row = Vec::with_capacity(sparse_row.len()); + for &(j, val) in sparse_row.iter() { + new_row.push((j, val.saturating_mul(scale))); + } + out.push(new_row); + } + + out } // Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -776,52 +752,30 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { if cols == 0 { return vec![]; } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I32F32::saturating_from_num(0_f32); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) -} -// Row-wise matrix-vector product, column-wise sum: result_j = SUM(i) vector_i * matrix_ij. -#[allow(dead_code)] -pub fn matmul_64(matrix: &[Vec], vector: &[I64F64]) -> Vec { - let Some(first_row) = matrix.first() else { - return vec![]; - }; - let cols = first_row.len(); - if cols == 0 { - return vec![]; + let zero = I32F32::saturating_from_num(0.0); + let mut acc = vec![zero; cols]; + + let mut vec_it = vector.iter(); + for row in matrix.iter() { + // Use 0 if the vector ran out (rows beyond vector length contribute nothing). + let scale = vec_it.next().copied().unwrap_or(zero); + + let mut acc_it = acc.iter_mut(); + for m_val in row.iter() { + if let Some(a) = acc_it.next() { + *a = a.saturating_add(scale.saturating_mul(*m_val)); + } else { + // Ignore elements beyond the accumulator width (first row’s length). + break; + } + } } - assert!(matrix.len() == vector.len()); - matrix.iter().zip(vector).fold( - vec![I64F64::saturating_from_num(0.0); cols], - |acc, (row, vec_val)| { - row.iter() - .zip(acc) - .map(|(m_val, acc_val)| { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - acc_val.saturating_add(vec_val.saturating_mul(*m_val)) - }) - .collect() - }, - ) + + acc } // Column-wise matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code)] pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec { let Some(first_row) = matrix.first() else { return vec![]; @@ -829,143 +783,112 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], vector: &[I32F32], columns: u16, ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); columns as usize]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute ranks: r_j = SUM(i) w_ij * s_i - // Compute trust scores: t_j = SUM(i) w_ij * s_i - // result_j = SUM(i) vector_i * matrix_ij - result[*j as usize] = - result[*j as usize].saturating_add(vector[i].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; columns as usize]; + + let mut vec_it = vector.iter(); + for row in sparse_matrix.iter() { + let scale = vec_it.next().copied().unwrap_or(zero); + for &(j, val) in row.iter() { + if let Some(r) = result.get_mut(j as usize) { + *r = r.saturating_add(scale.saturating_mul(val)); + } } } + result } // Column-wise sparse_matrix-vector product, row-wise sum: result_i = SUM(j) vector_j * matrix_ij. -#[allow(dead_code, clippy::indexing_slicing)] pub fn matmul_transpose_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], ) -> Vec { - let mut result: Vec = vec![I32F32::saturating_from_num(0.0); sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - // Compute dividends: d_j = SUM(i) b_ji * inc_i - // result_j = SUM(i) vector_i * matrix_ji - // result_i = SUM(j) vector_j * matrix_ij - result[i] = result[i].saturating_add(vector[*j as usize].saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let mut result = vec![zero; sparse_matrix.len()]; + + let mut out_it = result.iter_mut(); + for row in sparse_matrix.iter() { + let Some(out_cell) = out_it.next() else { break }; + let mut acc = zero; + for &(j, val) in row.iter() { + let v = vector.get(j as usize).copied().unwrap_or(zero); + acc = acc.saturating_add(v.saturating_mul(val)); } + *out_cell = acc; } + result } // Set inplace matrix values above column threshold to threshold value. -#[allow(dead_code)] pub fn inplace_col_clip(x: &mut [Vec], col_threshold: &[I32F32]) { - x.iter_mut().for_each(|row| { - row.iter_mut() - .zip(col_threshold) - .for_each(|(value, threshold)| { - *value = *threshold.min(value); - }); - }); + for row in x.iter_mut() { + let mut thr_it = col_threshold.iter(); + for value in row.iter_mut() { + if let Some(th) = thr_it.next() { + // Clip: value = min(value, threshold) + *value = *th.min(&*value); + } else { + // No more thresholds; stop for this row. + break; + } + } + } } // Return sparse matrix with values above column threshold set to threshold value. -#[allow(dead_code, clippy::indexing_slicing)] pub fn col_clip_sparse( sparse_matrix: &[Vec<(u16, I32F32)>], col_threshold: &[I32F32], ) -> Vec> { - let mut result: Vec> = vec![vec![]; sparse_matrix.len()]; - for (i, sparse_row) in sparse_matrix.iter().enumerate() { - for (j, value) in sparse_row.iter() { - if col_threshold[*j as usize] < *value { - if 0 < col_threshold[*j as usize] { - result[i].push((*j, col_threshold[*j as usize])); + let zero = I32F32::saturating_from_num(0.0); + let mut result = Vec::with_capacity(sparse_matrix.len()); + + for row in sparse_matrix.iter() { + let mut out_row: Vec<(u16, I32F32)> = Vec::with_capacity(row.len()); + for &(j, val) in row.iter() { + let th = col_threshold.get(j as usize).copied().unwrap_or(zero); + if th < val { + if th > zero { + // clip down to threshold, but drop if threshold <= 0 + out_row.push((j, th)); } } else { - result[i].push((*j, *value)); + // keep original + out_row.push((j, val)); } } + result.push(out_row); } - result -} -// Set matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn clip( - x: &[Vec], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - x.iter() - .map(|row| { - row.iter() - .map(|elem| if *elem >= threshold { upper } else { lower }) - .collect() - }) - .collect() -} - -// Set inplace matrix values below threshold to lower, and equal-above to upper. -#[allow(dead_code)] -pub fn inplace_clip(x: &mut [Vec], threshold: I32F32, upper: I32F32, lower: I32F32) { - x.iter_mut().for_each(|row| { - row.iter_mut().for_each(|elem| { - *elem = if *elem >= threshold { upper } else { lower }; - }); - }); -} - -// Set sparse matrix values below threshold to lower, and equal-above to upper. -// Does not add missing elements (0 value assumed) when lower!=0. -#[allow(dead_code)] -pub fn clip_sparse( - sparse_matrix: &[Vec<(u16, I32F32)>], - threshold: I32F32, - upper: I32F32, - lower: I32F32, -) -> Vec> { - sparse_matrix - .iter() - .map(|row| { - row.iter() - .map(|(j, value)| { - if *value < threshold { - (*j, lower) - } else { - (*j, upper) - } - }) - .collect() - }) - .collect() + result } // Stake-weighted median score finding algorithm, based on a mid pivot binary search. @@ -995,144 +918,199 @@ pub fn clip_sparse( // * 'median': ( I32F32 ): // - median via random pivot binary search. // -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median( stake: &[I32F32], score: &[I32F32], partition_idx: &[usize], minority: I32F32, - partition_lo: I32F32, - partition_hi: I32F32, + mut partition_lo: I32F32, + mut partition_hi: I32F32, ) -> I32F32 { - let n = partition_idx.len(); - if n == 0 { - return I32F32::saturating_from_num(0); - } - if n == 1 { - return score[partition_idx[0]]; + let zero = I32F32::saturating_from_num(0.0); + if stake.len() != score.len() { + log::error!( + "weighted_median stake and score have different lengths: {:?} != {:?}", + stake.len(), + score.len() + ); + return zero; } - assert!(stake.len() == score.len()); - let mid_idx: usize = n.safe_div(2); - let pivot: I32F32 = score[partition_idx[mid_idx]]; - let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); - let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + let mut current_partition_index: Vec = partition_idx.to_vec(); + let mut iteration_counter: usize = 0; + let iteration_limit = partition_idx.len(); let mut lower: Vec = vec![]; let mut upper: Vec = vec![]; - for &idx in partition_idx { - if score[idx] == pivot { - continue; + + loop { + let n = current_partition_index.len(); + if n == 0 { + return zero; } - if score[idx] < pivot { - lo_stake = lo_stake.saturating_add(stake[idx]); - lower.push(idx); - } else { - hi_stake = hi_stake.saturating_add(stake[idx]); - upper.push(idx); + if n == 1 { + if let Some(&only_idx) = current_partition_index.first() { + return get_safe::(score, only_idx); + } else { + return zero; + } } - } - if (partition_lo.saturating_add(lo_stake) <= minority) - && (minority < partition_hi.saturating_sub(hi_stake)) - { - return pivot; - } else if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { - return weighted_median( - stake, - score, - &lower, - minority, - partition_lo, - partition_lo.saturating_add(lo_stake), - ); - } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { - return weighted_median( - stake, + let mid_idx: usize = n.safe_div(2); + let pivot: I32F32 = get_safe::( score, - &upper, - minority, - partition_hi.saturating_sub(hi_stake), - partition_hi, + current_partition_index.get(mid_idx).copied().unwrap_or(0), ); + let mut lo_stake: I32F32 = I32F32::saturating_from_num(0); + let mut hi_stake: I32F32 = I32F32::saturating_from_num(0); + + for idx in current_partition_index.clone() { + if get_safe::(score, idx) == pivot { + continue; + } + if get_safe::(score, idx) < pivot { + lo_stake = lo_stake.saturating_add(get_safe::(stake, idx)); + lower.push(idx); + } else { + hi_stake = hi_stake.saturating_add(get_safe::(stake, idx)); + upper.push(idx); + } + } + if (minority < partition_lo.saturating_add(lo_stake)) && (!lower.is_empty()) { + current_partition_index = lower.clone(); + partition_hi = partition_lo.saturating_add(lo_stake); + } else if (partition_hi.saturating_sub(hi_stake) <= minority) && (!upper.is_empty()) { + current_partition_index = upper.clone(); + partition_lo = partition_hi.saturating_sub(hi_stake); + } else { + return pivot; + } + + lower.clear(); + upper.clear(); + + // Safety limit: We should never need more than iteration_limit iterations. + iteration_counter = iteration_counter.saturating_add(1); + if iteration_counter > iteration_limit { + break; + } } - pivot + zero } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col( stake: &[I32F32], score: &[Vec], majority: I32F32, ) -> Vec { - let rows = stake.len(); - let columns = score[0].len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut median: Vec = vec![zero; columns]; - - #[allow(clippy::needless_range_loop)] - for c in 0..columns { - let mut use_stake: Vec = vec![]; - let mut use_score: Vec = vec![]; - for r in 0..rows { - assert_eq!(columns, score[r].len()); - if stake[r] > zero { - use_stake.push(stake[r]); - use_score.push(score[r][c]); + let zero = I32F32::saturating_from_num(0.0); + + // Determine number of columns from the first row (no indexing). + let columns = score.first().map(|r| r.len()).unwrap_or(0); + let mut median = vec![zero; columns]; + + // Iterate columns without indexing into `median`. + let mut c = 0usize; + for med_cell in median.iter_mut() { + let mut use_stake: Vec = Vec::new(); + let mut use_score: Vec = Vec::new(); + + // Iterate rows aligned with `stake` length; avoid indexing into `stake`/`score`. + let mut r = 0usize; + while r < stake.len() { + let st = get_safe::(stake, r); + if st > zero { + // Fetch row safely; if it's missing or has wrong width, push zeros to both. + if let Some(row) = score.get(r) { + if row.len() == columns { + let val = row.get(c).copied().unwrap_or(zero); + use_stake.push(st); + use_score.push(val); + } else { + use_stake.push(zero); + use_score.push(zero); + } + } else { + // Missing row: insert zeroes. + use_stake.push(zero); + use_score.push(zero); + } } + r = r.saturating_add(1); } + if !use_stake.is_empty() { inplace_normalize(&mut use_stake); let stake_sum: I32F32 = use_stake.iter().sum(); let minority: I32F32 = stake_sum.saturating_sub(majority); - median[c] = weighted_median( + + let idxs: Vec = (0..use_stake.len()).collect(); + *med_cell = weighted_median( &use_stake, &use_score, - (0..use_stake.len()).collect::>().as_slice(), + idxs.as_slice(), minority, zero, stake_sum, ); } + + c = c.saturating_add(1); } median } /// Column-wise weighted median, e.g. stake-weighted median scores per server (column) over all validators (rows). -#[allow(dead_code, clippy::indexing_slicing)] pub fn weighted_median_col_sparse( stake: &[I32F32], score: &[Vec<(u16, I32F32)>], columns: u16, majority: I32F32, ) -> Vec { - let rows = stake.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); + let zero = I32F32::saturating_from_num(0.0); + + // Keep only positive-stake rows; normalize them. let mut use_stake: Vec = stake.iter().copied().filter(|&s| s > zero).collect(); inplace_normalize(&mut use_stake); + let stake_sum: I32F32 = use_stake.iter().sum(); - let stake_idx: Vec = (0..use_stake.len()).collect(); let minority: I32F32 = stake_sum.saturating_sub(majority); - let mut use_score: Vec> = vec![vec![zero; use_stake.len()]; columns as usize]; - let mut median: Vec = vec![zero; columns as usize]; + let stake_idx: Vec = (0..use_stake.len()).collect(); + + // use_score: columns x use_stake.len(), prefilled with zeros. + let mut use_score: Vec> = (0..columns as usize) + .map(|_| vec![zero; use_stake.len()]) + .collect(); + + // Fill use_score by walking stake and score together, counting positives with k. let mut k: usize = 0; - for r in 0..rows { - if stake[r] <= zero { - continue; - } - for (c, val) in score[r].iter() { - use_score[*c as usize][k] = *val; + let mut stake_it = stake.iter(); + let mut score_it = score.iter(); + + while let (Some(&s), Some(sparse_row)) = (stake_it.next(), score_it.next()) { + if s > zero { + for &(c, val) in sparse_row.iter() { + if let Some(col_vec) = use_score.get_mut(c as usize) { + if let Some(cell) = col_vec.get_mut(k) { + *cell = val; + } + } + } + k = k.saturating_add(1); } - k.saturating_inc(); } - for c in 0..columns as usize { - median[c] = weighted_median( + + // Compute weighted median per column without indexing. + let mut median: Vec = Vec::with_capacity(columns as usize); + for col_vec in use_score.iter() { + median.push(weighted_median( &use_stake, - &use_score[c], - &stake_idx, + col_vec, + stake_idx.as_slice(), minority, zero, stake_sum, - ); + )); } + median } @@ -1140,34 +1118,51 @@ pub fn weighted_median_col_sparse( // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> Vec> { - if ratio == I32F32::saturating_from_num(0) { + if ratio == I32F32::saturating_from_num(0.0) { return mat1.to_owned(); } - if ratio == I32F32::saturating_from_num(1) { + if ratio == I32F32::saturating_from_num(1.0) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); - if mat1.is_empty() { - return vec![vec![]; 1]; - } - if mat1.first().unwrap_or(&vec![]).is_empty() { - return vec![vec![]; 1]; + if mat1.is_empty() || mat1.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - let mut result: Vec> = - vec![ - vec![I32F32::saturating_from_num(0); mat1.first().unwrap_or(&vec![]).len()]; - mat1.len() - ]; - for (i, (row1, row2)) in mat1.iter().zip(mat2.iter()).enumerate() { - assert!(row1.len() == row2.len()); - for (j, (&v1, &v2)) in row1.iter().zip(row2.iter()).enumerate() { - if let Some(res) = result.get_mut(i).unwrap_or(&mut vec![]).get_mut(j) { - *res = v1.saturating_add(ratio.saturating_mul(v2.saturating_sub(v1))); - } + + let zero = I32F32::saturating_from_num(0.0); + let cols = mat1.first().map(|r| r.len()).unwrap_or(0); + + // Pre-size result to mat1's shape (row count = mat1.len(), col count = first row of mat1). + let mut result: Vec> = { + let mut out = Vec::with_capacity(mat1.len()); + for _ in mat1.iter() { + out.push(vec![zero; cols]); + } + out + }; + + // Walk rows of mat1, mat2, and result in lockstep; stop when any iterator ends. + let mut m2_it = mat2.iter(); + let mut out_it = result.iter_mut(); + + for row1 in mat1.iter() { + let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { + break; + }; + + // Walk elements of row1, row2, and out_row in lockstep; stop at the shortest. + let mut r1_it = row1.iter(); + let mut r2_it = row2.iter(); + let mut out_cell_it = out_row.iter_mut(); + + while let (Some(v1), Some(v2), Some(out_cell)) = + (r1_it.next(), r2_it.next(), out_cell_it.next()) + { + *out_cell = (*v1).saturating_add(ratio.saturating_mul((*v2).saturating_sub(*v1))); } + // Any remaining cells in `out_row` (beyond min row length) stay as zero (pre-filled). } + result } @@ -1175,7 +1170,6 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> // ratio has intended range [0, 1] // ratio=0: Result = A // ratio=1: Result = B -#[allow(dead_code)] pub fn interpolate_sparse( mat1: &[Vec<(u16, I32F32)>], mat2: &[Vec<(u16, I32F32)>], @@ -1188,7 +1182,10 @@ pub fn interpolate_sparse( if ratio == I32F32::saturating_from_num(1) { return mat2.to_owned(); } - assert!(mat1.len() == mat2.len()); + if mat1.len() != mat2.len() { + // In case if sizes mismatch, return clipped weights + return mat2.to_owned(); + } let rows = mat1.len(); let zero: I32F32 = I32F32::saturating_from_num(0); let mut result: Vec> = vec![vec![]; rows]; @@ -1224,12 +1221,16 @@ pub fn interpolate_sparse( } // Element-wise product of two vectors. -#[allow(dead_code)] pub fn vec_mul(a: &[I32F32], b: &[I32F32]) -> Vec { - a.iter() - .zip(b.iter()) - .map(|(x, y)| x.checked_mul(*y).unwrap_or_default()) - .collect() + let mut out = Vec::with_capacity(core::cmp::min(a.len(), b.len())); + let mut ai = a.iter(); + let mut bi = b.iter(); + + while let (Some(x), Some(y)) = (ai.next(), bi.next()) { + out.push(x.checked_mul(*y).unwrap_or_default()); + } + + out } // Element-wise product of matrix and vector @@ -1240,11 +1241,15 @@ pub fn mat_vec_mul(matrix: &[Vec], vector: &[I32F32]) -> Vec if first_row.is_empty() { return vec![vec![]]; } - matrix.iter().map(|row| vec_mul(row, vector)).collect() + + let mut out = Vec::with_capacity(matrix.len()); + for row in matrix.iter() { + out.push(vec_mul(row, vector)); + } + out } // Element-wise product of matrix and vector -#[allow(dead_code)] pub fn mat_vec_mul_sparse( matrix: &[Vec<(u16, I32F32)>], vector: &[I32F32], @@ -1265,58 +1270,6 @@ pub fn mat_vec_mul_sparse( result } -// Element-wise product of two matrices. -#[allow(dead_code)] -pub fn hadamard(mat1: &[Vec], mat2: &[Vec]) -> Vec> { - assert!(mat1.len() == mat2.len()); - let Some(first_row) = mat1.first() else { - return vec![vec![]]; - }; - if first_row.is_empty() { - return vec![vec![]]; - } - mat1.iter() - .zip(mat2) - .map(|(row1, row2)| { - assert!(row1.len() == row2.len()); - row1.iter() - .zip(row2) - .map(|(elem1, elem2)| elem1.saturating_mul(*elem2)) - .collect() - }) - .collect() -} - -// Element-wise product of two sparse matrices. -#[allow(dead_code, clippy::indexing_slicing)] -pub fn hadamard_sparse( - mat1: &[Vec<(u16, I32F32)>], - mat2: &[Vec<(u16, I32F32)>], - columns: u16, -) -> Vec> { - assert!(mat1.len() == mat2.len()); - let rows = mat1.len(); - let zero: I32F32 = I32F32::saturating_from_num(0); - let mut result: Vec> = vec![vec![]; rows]; - for i in 0..rows { - let mut row1: Vec = vec![zero; columns as usize]; - for (j, value) in mat1[i].iter() { - row1[*j as usize] = row1[*j as usize].saturating_add(*value); - } - let mut row2: Vec = vec![zero; columns as usize]; - for (j, value) in mat2[i].iter() { - row2[*j as usize] = row2[*j as usize].saturating_add(*value); - } - for j in 0..columns as usize { - let prod: I32F32 = row1[j].saturating_mul(row2[j]); - if zero < prod { - result[i].push((j as u16, prod)) - } - } - } - result -} - /// Clamp the input value between high and low. /// Note: assumes high > low pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { @@ -1334,7 +1287,6 @@ pub fn clamp_value(value: I32F32, low: I32F32, high: I32F32) -> I32F32 { // Return matrix exponential moving average: `alpha * a_ij + one_minus_alpha * b_ij`. // `alpha` is the EMA coefficient, how much to add of the new observation, typically small, // higher alpha discounts older observations faster. -#[allow(dead_code)] pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec> { let Some(first_row) = new.first() else { return vec![vec![]]; @@ -1342,214 +1294,199 @@ pub fn mat_ema(new: &[Vec], old: &[Vec], alpha: I32F32) -> Vec], old: &[Vec<(u16, I32F32)>], alpha: I32F32, ) -> Vec> { - assert!(new.len() == old.len()); - let n = new.len(); // assume square matrix, rows=cols - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one_minus_alpha: I32F32 = I32F32::saturating_from_num(1.0).saturating_sub(alpha); - let mut result: Vec> = vec![vec![]; n]; - for i in 0..new.len() { - let mut row: Vec = vec![zero; n]; - for (j, value) in new[i].iter() { - row[*j as usize] = row[*j as usize].saturating_add(alpha.saturating_mul(*value)); + let zero = I32F32::saturating_from_num(0.0); + let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha); + + let n = new.len(); // assume square (rows = cols) + if n == 0 { + return Vec::new(); + } + + let mut result: Vec> = Vec::with_capacity(n); + let mut old_it = old.iter(); + + for new_row in new.iter() { + let mut acc_row = vec![zero; n]; + + // Add alpha * new + for &(j, v) in new_row.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(alpha.saturating_mul(v)); + } } - for (j, value) in old[i].iter() { - row[*j as usize] = - row[*j as usize].saturating_add(one_minus_alpha.saturating_mul(*value)); + + // Add (1 - alpha) * old + if let Some(orow) = old_it.next() { + for &(j, v) in orow.iter() { + if let Some(cell) = acc_row.get_mut(j as usize) { + *cell = cell.saturating_add(one_minus_alpha.saturating_mul(v)); + } + } } - for (j, value) in row.iter().enumerate() { - if *value > zero { - result[i].push((j as u16, *value)) + + // Densified row -> sparse (keep positives) + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for (j, &val) in acc_row.iter().enumerate() { + if val > zero { + out_row.push((j as u16, val)); } } + + result.push(out_row); } + result } /// Calculates the exponential moving average (EMA) for a sparse matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha_sparse( new: &[Vec<(u16, I32F32)>], old: &[Vec<(u16, I32F32)>], alpha: &[Vec], ) -> Vec> { - // Ensure dimensions match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); + // If shapes don't match, just return `new` + if new.len() != old.len() || new.len() != alpha.len() { + return new.to_owned(); + } - // The output vector of rows. - let mut result: Vec> = Vec::with_capacity(new.len()); - let zero: I32F32 = I32F32::saturating_from_num(0.0); + let zero = I32F32::saturating_from_num(0.0); let one = I32F32::saturating_from_num(1.0); - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - // Initialize a row of zeros for the result matrix. - let mut decayed_values: Vec = vec![zero; alpha_row.len()]; + let mut result: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alf_it = alpha.iter(); - let mut result_row: Vec<(u16, I32F32)> = Vec::new(); + for new_row in new.iter() { + let Some(old_row) = old_it.next() else { break }; + let Some(alpha_row) = alf_it.next() else { + break; + }; - // Process the old matrix values. - for (j, old_val) in old_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = ( - alpha_row.get(*j as usize), - decayed_values.get_mut(*j as usize), + // Densified accumulator sized to alpha_row length (columns outside are ignored). + let mut decayed_values = vec![zero; alpha_row.len()]; + + // Apply (1 - alpha_j) * old_ij into accumulator. + for &(j, old_val) in old_row.iter() { + if let (Some(&a), Some(cell)) = ( + alpha_row.get(j as usize), + decayed_values.get_mut(j as usize), ) { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - // Bonds_decayed = Bonds * (1 - alpha) - *decayed_val = one_minus_alpha.saturating_mul(*old_val); + *cell = one.saturating_sub(a).saturating_mul(old_val); } } - // Process the new matrix values. - for (j, new_val) in new_row.iter() { - if let (Some(alpha_val), Some(decayed_val)) = - (alpha_row.get(*j as usize), decayed_values.get(*j as usize)) + // Add alpha_j * new_ij, clamp to [0, 1], and emit sparse entries > 0. + let mut out_row: Vec<(u16, I32F32)> = Vec::new(); + for &(j, new_val) in new_row.iter() { + if let (Some(&a), Some(&decayed)) = + (alpha_row.get(j as usize), decayed_values.get(j as usize)) { - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - - if result_val > zero { - result_row.push((*j, result_val)); + let inc = a.saturating_mul(new_val).max(zero); + let val = decayed.saturating_add(inc).min(one); + if val > zero { + out_row.push((j, val)); } } } - result.push(result_row); + + result.push(out_row); } - // Return the computed EMA sparse matrix. result } /// Calculates the exponential moving average (EMA) for a dense matrix using dynamic alpha values. -#[allow(dead_code)] pub fn mat_ema_alpha( new: &[Vec], // Weights old: &[Vec], // Bonds alpha: &[Vec], ) -> Vec> { - // Check if the new matrix is empty or its first row is empty. - if new.is_empty() || new.first().is_none_or(|row| row.is_empty()) { - return vec![vec![]; 1]; + // Empty or degenerate input + if new.is_empty() || new.first().map(|r| r.is_empty()).unwrap_or(true) { + return vec![vec![]]; } - // Ensure the dimensions of the new, old and alpha matrices match. - assert!(new.len() == old.len()); - assert!(new.len() == alpha.len()); - - // Initialize the result matrix with zeros, having the same dimensions as the new matrix. - let zero: I32F32 = I32F32::saturating_from_num(0.0); - let one = I32F32::saturating_from_num(1.0); - - let mut result: Vec> = Vec::with_capacity(new.len()); - - // Iterate over each row of the matrices. - for ((new_row, old_row), alpha_row) in new.iter().zip(old).zip(alpha) { - assert!(new_row.len() == old_row.len()); - assert!(new_row.len() == alpha_row.len()); - let mut result_row: Vec = Vec::new(); - - // Iterate over each column of the current row. - for j in 0..new_row.len() { - // Compute the EMA for the current element using saturating operations. - if let (Some(new_val), Some(old_val), Some(alpha_val)) = - (new_row.get(j), old_row.get(j), alpha_row.get(j)) - { - // Calculate the complement of the alpha value - let one_minus_alpha = one.saturating_sub(*alpha_val); - - // Bonds_decayed = Bonds * (1 - alpha) - let decayed_val = one_minus_alpha.saturating_mul(*old_val); + // If outer dimensions don't match, return bonds unchanged + if new.len() != old.len() || new.len() != alpha.len() { + return old.to_owned(); + } - // Each validator can increase bonds by at most clamped_alpha per epoch towards the cap - // Validators allocate their purchase across miners based on weights - let purchase_increment = alpha_val.saturating_mul(*new_val).max(zero); - let result_val = decayed_val.saturating_add(purchase_increment).min(one); - result_row.push(result_val); - } + // Ensure each corresponding row has matching length; otherwise return `new` unchanged. + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + return new.to_owned(); + }; + if nrow.len() != orow.len() || nrow.len() != arow.len() { + return new.to_owned(); } - result.push(result_row); } - // Return the computed EMA matrix. - result -} -/// Return the quantile of a vector of I32F32 values. -pub fn quantile(data: &[I32F32], quantile: f64) -> I32F32 { - // Clone the input data to avoid modifying the original vector. - let mut sorted_data = data.to_owned(); - - // Sort the cloned data in ascending order, handling potential NaN values. - sorted_data.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); - - // Get the length of the sorted data. - let len = sorted_data.len(); + let zero = I32F32::saturating_from_num(0.0); + let one = I32F32::saturating_from_num(1.0); - // If the data is empty, return 0 as the quantile value. - if len == 0 { - return I32F32::saturating_from_num(0); - } + // Compute EMA: result = (1 - α) * old + α * new, clamped to [0, 1]. + let mut out: Vec> = Vec::with_capacity(new.len()); + let mut old_it = old.iter(); + let mut alp_it = alpha.iter(); - // Calculate the position in the sorted array corresponding to the quantile. - let pos = quantile * (len.saturating_sub(1)) as f64; + for nrow in new.iter() { + let (Some(orow), Some(arow)) = (old_it.next(), alp_it.next()) else { + break; + }; - // Determine the lower index by flooring the position. - let low = pos.floor() as usize; + let mut r: Vec = Vec::with_capacity(nrow.len()); + let mut n_it = nrow.iter(); + let mut o_it = orow.iter(); + let mut a_it = arow.iter(); - // Determine the higher index by ceiling the position. - let high = pos.ceil() as usize; + while let (Some(&n), Some(&o), Some(&a)) = (n_it.next(), o_it.next(), a_it.next()) { + let one_minus_a = one.saturating_sub(a); + let decayed = one_minus_a.saturating_mul(o); + let inc = a.saturating_mul(n).max(zero); + r.push(decayed.saturating_add(inc).min(one)); + } - // If the low and high indices are the same, return the value at that index. - if low == high { - sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)) - } else { - // Otherwise, perform linear interpolation between the low and high values. - let low_value = sorted_data - .get(low) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - let high_value = sorted_data - .get(high) - .copied() - .unwrap_or_else(|| I32F32::saturating_from_num(0)); - - // Calculate the weight for interpolation. - let weight = I32F32::saturating_from_num(pos - low as f64); - - // Return the interpolated value using saturating operations. - low_value.saturating_add((high_value.saturating_sub(low_value)).saturating_mul(weight)) + out.push(r); } + + out } /// Safe ln function, returns 0 if value is 0. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7ab4446c3e..f4e94099ed 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -87,7 +87,7 @@ impl Pallet { Self::epoch_dense_mechanism(netuid, MechId::MAIN, rao_emission) } - /// Persists per-subsubnet epoch output in state + /// Persists per-mechanism epoch output in state pub fn persist_mechanism_epoch_terms( netuid: NetUid, mecid: MechId, diff --git a/pallets/subtensor/src/subnets/mechanism.rs b/pallets/subtensor/src/subnets/mechanism.rs index f8fa76ad51..6598c308f2 100644 --- a/pallets/subtensor/src/subnets/mechanism.rs +++ b/pallets/subtensor/src/subnets/mechanism.rs @@ -39,6 +39,15 @@ impl Pallet { .into() } + pub fn get_netuid(netuid_index: NetUidStorageIndex) -> NetUid { + if let Some(netuid) = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT) { + NetUid::from(netuid) + } else { + // Because GLOBAL_MAX_SUBNET_COUNT is not zero, this never happens + NetUid::ROOT + } + } + pub fn get_netuid_and_subid( netuid_index: NetUidStorageIndex, ) -> Result<(NetUid, MechId), Error> { diff --git a/pallets/subtensor/src/tests/math.rs b/pallets/subtensor/src/tests/math.rs index a200fa8b25..6c6636ca68 100644 --- a/pallets/subtensor/src/tests/math.rs +++ b/pallets/subtensor/src/tests/math.rs @@ -1341,39 +1341,6 @@ fn test_math_row_sum_sparse() { assert_vec_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_col_sum() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let result = col_sum(&matrix); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_col_sum_sparse() { - let matrix: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 26., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 2., 3., 4., 0., 6., 7., 8., 0., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[21., 21., 21.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![1., 0., 3., 4., 0., 6., 7., 0., 9., 10., 0., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[22., 0., 30.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); - let matrix: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let result = col_sum_sparse(&matrix, 3); - let target: Vec = vec_to_fixed(&[0., 0., 0.]); - assert_vec_compare(&result, &target, I32F32::from_num(0)); -} - #[test] fn test_math_matmul() { let vector: Vec = vec_to_fixed(&[1., 2., 3., 4.]); @@ -1468,51 +1435,6 @@ fn test_math_col_clip_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_clip_sparse() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_sparse_mat_fixed(&matrix, 4, false); - let target: Vec = vec![0., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = clip_sparse( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - let result = clip( - &matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&result, &target, I32F32::from_num(0)); -} - -#[test] -fn test_math_inplace_clip() { - let matrix: Vec = vec![0., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mut matrix = vec_to_mat_fixed(&matrix, 4, false); - let target: Vec = vec![1., 1., 1., 1., 1., 1., 1., 100., 100., 100., 100., 100.]; - let target = vec_to_mat_fixed(&target, 4, false); - inplace_clip( - &mut matrix, - I32F32::from_num(8), - I32F32::from_num(100), - I32F32::from_num(1), - ); - assert_mat_compare(&matrix, &target, I32F32::from_num(0)); -} - #[test] fn test_math_weighted_median() { let mut rng = thread_rng(); @@ -2083,70 +2005,6 @@ fn test_math_interpolate_sparse() { assert_sparse_mat_compare(&result, &target, I32F32::from_num(0)); } -#[test] -fn test_math_hadamard() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_mat_fixed(&mat1, 4, false); - let target = vec_to_mat_fixed(&target, 4, false); - let result = hadamard(&mat1, &mat2); - assert_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - -#[test] -fn test_math_hadamard_sparse() { - let mat2: Vec = vec![1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let mat1: Vec = vec![ - 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., - ]; - let target: Vec = vec![ - 10., 40., 90., 160., 250., 360., 490., 640., 810., 1000., 1210., 1440., - ]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); - let mat2: Vec = vec![1., 0., 0., 0., 2., 0., 0., 0., 3., 0., 0., 0.]; - let mat1: Vec = vec![0., 0., 4., 0., 5., 0., 6., 0., 0., 0., 0., 0.]; - let target: Vec = vec![0., 0., 0., 0., 10., 0., 0., 0., 0., 0., 0., 0.]; - let mat2 = vec_to_sparse_mat_fixed(&mat2, 4, false); - let mat1 = vec_to_sparse_mat_fixed(&mat1, 4, false); - let target = vec_to_sparse_mat_fixed(&target, 4, false); - let result = hadamard_sparse(&mat1, &mat2, 3); - assert_sparse_mat_compare(&result, &target, I32F32::from_num(0.000001)); -} - #[test] fn test_math_mat_ema_alpha() { let old: Vec = vec![ @@ -2726,9 +2584,7 @@ fn test_mat_ema_alpha_single_element() { assert_eq!(result, expected); } -// TODO: (@sd): Should these be non panicking? #[test] -#[should_panic(expected = "assertion failed")] fn test_mat_ema_alpha_mismatched_dimensions() { let new = mat_to_fixed(&[vec![1.0, 2.0], vec![3.0, 4.0]]); let old = mat_to_fixed(&[vec![1.0, 2.0, 3.0], vec![4.0, 5.0, 6.0]]); @@ -2740,41 +2596,6 @@ fn test_mat_ema_alpha_mismatched_dimensions() { ]; 2 ]; - let _result = mat_ema_alpha(&new, &old, &alpha); -} - -#[test] -fn test_quantile() { - // Test with a non-empty vector and valid quantile values - let data = vec![ - I32F32::from_num(1.0), - I32F32::from_num(2.0), - I32F32::from_num(3.0), - I32F32::from_num(4.0), - I32F32::from_num(5.0), - ]; - - // Test 0th quantile (minimum) - let result = quantile(&data, 0.0); - assert_eq!(result, I32F32::from_num(1.0)); - - // Test 25th quantile - let result = quantile(&data, 0.25); - assert_eq!(result, I32F32::from_num(2.0)); - - // Test 50th quantile (median) - let result = quantile(&data, 0.5); - assert_eq!(result, I32F32::from_num(3.0)); - - // Test 66th quantile - let result = quantile(&data, 0.66); - assert_eq!(result, I32F32::from_num(3.64)); - - // Test 75th quantile - let result = quantile(&data, 0.75); - assert_eq!(result, I32F32::from_num(4.0)); - - // Test 100th quantile (maximum) - let result = quantile(&data, 1.0); - assert_eq!(result, I32F32::from_num(5.0)); + let result = mat_ema_alpha(&new, &old, &alpha); + assert_eq!(result[0][0], old[0][0]) } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 9ca7e361cc..88444a1b83 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -206,8 +206,16 @@ impl Pallet { pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUidStorageIndex) -> Vec { - LastUpdate::::get(netuid) + /// Fetch LastUpdate for `netuid` and ensure its length is at least `get_subnetwork_n(netuid)`, + /// padding with zeros if needed. Returns the (possibly padded) vector. + pub fn get_last_update(netuid_index: NetUidStorageIndex) -> Vec { + let netuid = Self::get_netuid(netuid_index); + let target_len = Self::get_subnetwork_n(netuid) as usize; + let mut v = LastUpdate::::get(netuid_index); + if v.len() < target_len { + v.resize(target_len, 0); + } + v } pub fn get_pruning_score(netuid: NetUid) -> Vec { PruningScores::::get(netuid) From 29414e54a19aaeba8e26c0507894f8d1e0bb09a2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:02:57 -0400 Subject: [PATCH 315/379] Cleanup code comments --- pallets/subtensor/src/epoch/math.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 6288ac14ae..ccc23a1bed 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -231,10 +231,10 @@ pub fn is_topk_nonzero(vector: &[I32F32], k: usize) -> Vec { return result; } let mut idxs: Vec = (0..n).collect(); - idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort (no indexing) + idxs.sort_by_key(|&idx| get_safe(vector, idx)); // ascending stable sort for &idx in idxs.iter().take(n.saturating_sub(k)) { if let Some(cell) = result.get_mut(idx) { - *cell = false; // no indexing + *cell = false; } } result @@ -1003,17 +1003,17 @@ pub fn weighted_median_col( ) -> Vec { let zero = I32F32::saturating_from_num(0.0); - // Determine number of columns from the first row (no indexing). + // Determine number of columns from the first row. let columns = score.first().map(|r| r.len()).unwrap_or(0); let mut median = vec![zero; columns]; - // Iterate columns without indexing into `median`. + // Iterate columns into `median`. let mut c = 0usize; for med_cell in median.iter_mut() { let mut use_stake: Vec = Vec::new(); let mut use_score: Vec = Vec::new(); - // Iterate rows aligned with `stake` length; avoid indexing into `stake`/`score`. + // Iterate rows aligned with `stake` length. let mut r = 0usize; while r < stake.len() { let st = get_safe::(stake, r); @@ -1098,7 +1098,7 @@ pub fn weighted_median_col_sparse( } } - // Compute weighted median per column without indexing. + // Compute weighted median per column. let mut median: Vec = Vec::with_capacity(columns as usize); for col_vec in use_score.iter() { median.push(weighted_median( From 93cb2da050977ddf8abc6e308fbcaa786d336809 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:11:17 -0400 Subject: [PATCH 316/379] Add a test for mismatching sizes of LastUpdate vector and Weights matrix (issue that killed TestNet) --- pallets/subtensor/src/tests/epoch.rs | 56 ++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index fec978a51d..7c23dc2b2c 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -3827,3 +3827,59 @@ fn test_epoch_does_not_mask_outside_window_but_masks_inside() { ); }); } + +// Test an epoch doesn't panic when LastUpdate size doesn't match to Weights size. +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_last_update_size_mismatch --exact --show-output --nocapture +#[test] +fn test_last_update_size_mismatch() { + new_test_ext(1).execute_with(|| { + log::info!("test_1_graph:"); + let netuid = NetUid::from(1); + let coldkey = U256::from(0); + let hotkey = U256::from(0); + let uid: u16 = 0; + let stake_amount: u64 = 1_000_000_000; + add_network_disable_commit_reveal(netuid, u16::MAX - 1, 0); + SubtensorModule::set_max_allowed_uids(netuid, 1); + SubtensorModule::add_balance_to_coldkey_account( + &coldkey, + stake_amount + ExistentialDeposit::get(), + ); + register_ok_neuron(netuid, hotkey, coldkey, 1); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + + assert_ok!(SubtensorModule::add_stake( + RuntimeOrigin::signed(coldkey), + hotkey, + netuid, + stake_amount.into() + )); + + assert_eq!(SubtensorModule::get_subnetwork_n(netuid), 1); + run_to_block(1); // run to next block to ensure weights are set on nodes after their registration block + assert_ok!(SubtensorModule::set_weights( + RuntimeOrigin::signed(U256::from(uid)), + netuid, + vec![uid], + vec![u16::MAX], + 0 + )); + + // Set mismatching LastUpdate vector + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![1, 1, 1]); + + SubtensorModule::epoch(netuid, 1_000_000_000.into()); + assert_eq!( + SubtensorModule::get_total_stake_for_hotkey(&hotkey), + stake_amount.into() + ); + assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); + assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); + assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); + }); +} From d7f062676a46a16de679e39e1a75a6e139bc453e Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 14:13:01 -0700 Subject: [PATCH 317/379] debug in docker --- Dockerfile-localnet | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 983c3bcd05..a151d76093 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -16,6 +16,9 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ COPY . /build WORKDIR /build +# Debug: print full file tree under /build +RUN echo "📂 Contents of /build:" && find . -type f | sort + # Set up env var ARG BUILT_IN_CI ENV BUILT_IN_CI=${BUILT_IN_CI} From 2af0fb26baa08c81ba5086adf8d10a71a8f16a50 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 14:31:41 -0700 Subject: [PATCH 318/379] bc of .dockerignore --- .github/workflows/docker-localnet.yml | 18 +++++++++--------- Dockerfile-localnet | 5 ++--- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 584d447f22..6528ef0d0d 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -87,15 +87,15 @@ jobs: - name: Prepare artifacts for upload run: | - mkdir -p build/scripts/ - cp -v snapshot.json build/ || true - cp -v scripts/localnet.sh build/scripts/localnet.sh || true +# mkdir -p build/scripts/ +# cp -v snapshot.json build/ || true +# cp -v scripts/localnet.sh build/scripts/localnet.sh || true - mkdir -p build/target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor build/target/${{ matrix.runtime }}/release/node-subtensor || true + mkdir -p build/ci_target/${{ matrix.runtime }}/release/ || true + cp -v target/${{ matrix.runtime }}/release/node-subtensor build/ci_target/${{ matrix.runtime }}/release/node-subtensor || true - mkdir -p build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true + mkdir -p build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true + cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true - name: Upload artifacts uses: actions/upload-artifact@v4 @@ -169,5 +169,5 @@ jobs: tags: | ghcr.io/${{ github.repository }}-localnet:${{ needs.setup.outputs.tag }} ${{ needs.setup.outputs.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} -# cache-from: type=gha -# cache-to: type=gha,mode=max + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/Dockerfile-localnet b/Dockerfile-localnet index a151d76093..b05a563925 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -16,9 +16,6 @@ LABEL ai.opentensor.image.authors="operations@opentensor.ai" \ COPY . /build WORKDIR /build -# Debug: print full file tree under /build -RUN echo "📂 Contents of /build:" && find . -type f | sort - # Set up env var ARG BUILT_IN_CI ENV BUILT_IN_CI=${BUILT_IN_CI} @@ -39,6 +36,8 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ ./scripts/localnet.sh False --build-only ; \ else \ echo "[*] BUILT_IN_CI is set → skipping install + build."; \ + echo "[*] Renaming /build/ci_target → /build/target bc of .dockerignore" && \ + mv /build/ci_target /build/target ; \ fi # Verify the binaries was produced From 34de39f55aacb61f332c9c77d1588f2f5a2be1c2 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 14:33:46 -0700 Subject: [PATCH 319/379] small correct --- Dockerfile-localnet | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index b05a563925..d365174bd2 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -34,10 +34,12 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ ./scripts/install_build_env.sh && \ ./scripts/localnet.sh --build-only && \ ./scripts/localnet.sh False --build-only ; \ - else \ - echo "[*] BUILT_IN_CI is set → skipping install + build."; \ - echo "[*] Renaming /build/ci_target → /build/target bc of .dockerignore" && \ + elif [ -d "/build/ci_target" ]; then \ + echo "[*] BUILT_IN_CI is set → skipping install + build." && \ + echo "[*] Renaming /build/ci_target → /build/target" && \ mv /build/ci_target /build/target ; \ + else \ + echo "[!] BUILT_IN_CI is set but /build/ci_target not found. Exiting." && exit 1 ; \ fi # Verify the binaries was produced From 57c37d9a3b11cebebaddfb8c78f98d553ab8b80b Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 14:36:05 -0700 Subject: [PATCH 320/379] small correct --- .github/workflows/docker-localnet.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 6528ef0d0d..821d55f6e9 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -87,16 +87,16 @@ jobs: - name: Prepare artifacts for upload run: | -# mkdir -p build/scripts/ -# cp -v snapshot.json build/ || true -# cp -v scripts/localnet.sh build/scripts/localnet.sh || true - mkdir -p build/ci_target/${{ matrix.runtime }}/release/ || true cp -v target/${{ matrix.runtime }}/release/node-subtensor build/ci_target/${{ matrix.runtime }}/release/node-subtensor || true mkdir -p build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true +# mkdir -p build/scripts/ +# cp -v snapshot.json build/ || true +# cp -v scripts/localnet.sh build/scripts/localnet.sh || true + - name: Upload artifacts uses: actions/upload-artifact@v4 with: From e6847636e0c332637cc8cb6fcee7ea698c014489 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 17:38:02 -0400 Subject: [PATCH 321/379] Remove remaining asserts in the codebase --- pallets/subtensor/src/epoch/run_epoch.rs | 17 ++++++++++++----- pallets/subtensor/src/utils/misc.rs | 13 +++++++------ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index f4e94099ed..7820a67f40 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1387,14 +1387,17 @@ impl Pallet { bonds: &[Vec], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::new(); + + if weights.len() != bonds.len() { + log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + return alphas; + } // Get the high and low alpha values for the network. let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::new(); - for (w_row, b_row) in weights.iter().zip(bonds.iter()) { let mut row_alphas = Vec::new(); @@ -1433,12 +1436,16 @@ impl Pallet { bonds: &[Vec<(u16, I32F32)>], // previous epoch bonds consensus: &[I32F32], // previous epoch consensus weights ) -> Vec> { - assert!(weights.len() == bonds.len()); + let mut alphas = Vec::with_capacity(consensus.len()); + + if weights.len() != bonds.len() { + log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + return alphas; + } let alpha_sigmoid_steepness: I32F32 = Self::get_alpha_sigmoid_steepness(netuid); let (alpha_low, alpha_high): (I32F32, I32F32) = Self::get_alpha_values_32(netuid); - let mut alphas = Vec::with_capacity(consensus.len()); let zero = I32F32::from_num(0.0); // iterate over rows diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 88444a1b83..2a8e55cb63 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -253,12 +253,13 @@ impl Pallet { SubnetworkN::::get(netuid) ); log::debug!("uid = {uid:?}"); - assert!(uid < SubnetworkN::::get(netuid)); - PruningScores::::mutate(netuid, |v| { - if let Some(s) = v.get_mut(uid as usize) { - *s = pruning_score; - } - }); + if uid < SubnetworkN::::get(netuid)) { + PruningScores::::mutate(netuid, |v| { + if let Some(s) = v.get_mut(uid as usize) { + *s = pruning_score; + } + }); + } } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) { let mut updated_validator_permits = Self::get_validator_permit(netuid); From 0f75c1b4627d693fac968e8fa728d627bd415870 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:21:35 -0400 Subject: [PATCH 322/379] Fix LastUpdate update in add_neuron --- pallets/subtensor/src/epoch/run_epoch.rs | 12 +++++- pallets/subtensor/src/subnets/uids.rs | 2 +- pallets/subtensor/src/tests/registration.rs | 41 ++++++++++++++++++++- pallets/subtensor/src/utils/misc.rs | 2 +- 4 files changed, 52 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7820a67f40..660690ae9f 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1390,7 +1390,11 @@ impl Pallet { let mut alphas = Vec::new(); if weights.len() != bonds.len() { - log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + log::error!( + "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); return alphas; } @@ -1439,7 +1443,11 @@ impl Pallet { let mut alphas = Vec::with_capacity(consensus.len()); if weights.len() != bonds.len() { - log::error!("compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len()); + log::error!( + "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + weights.len(), + bonds.len() + ); return alphas; } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index cf639f9fbf..2fcf981780 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -117,7 +117,7 @@ impl Pallet { for mecid in 0..MechanismCountCurrent::::get(netuid).into() { let netuid_index = Self::get_mechanism_storage_index(netuid, mecid.into()); Incentive::::mutate(netuid_index, |v| v.push(0)); - LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); + Self::set_last_update_for_uid(netuid_index, next_uid, block_number); } Dividends::::mutate(netuid, |v| v.push(0)); PruningScores::::mutate(netuid, |v| v.push(0)); diff --git a/pallets/subtensor/src/tests/registration.rs b/pallets/subtensor/src/tests/registration.rs index 23013d9b70..48e887d606 100644 --- a/pallets/subtensor/src/tests/registration.rs +++ b/pallets/subtensor/src/tests/registration.rs @@ -9,7 +9,7 @@ use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; use sp_core::U256; use sp_runtime::traits::{DispatchInfoOf, TransactionExtension, TxBaseImplication}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex}; use super::mock; use super::mock::*; @@ -2149,6 +2149,45 @@ fn test_registration_disabled() { }); } +#[test] +fn test_last_update_correctness() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + let hotkey_account_id = U256::from(1); + let burn_cost = 1000; + let coldkey_account_id = U256::from(667); // Neighbour of the beast, har har + //add network + SubtensorModule::set_burn(netuid, burn_cost.into()); + add_network(netuid, tempo, 0); + + let reserve = 1_000_000_000_000; + mock::setup_reserves(netuid, reserve.into(), reserve.into()); + + // Simulate existing neurons + let existing_neurons = 3; + SubnetworkN::::insert(netuid, existing_neurons); + + // Simulate no LastUpdate so far (can happen on mechanisms) + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); + + // Give some $$$ to coldkey + SubtensorModule::add_balance_to_coldkey_account(&coldkey_account_id, 10000); + // Subscribe and check extrinsic output + assert_ok!(SubtensorModule::burned_register( + <::RuntimeOrigin>::signed(coldkey_account_id), + netuid, + hotkey_account_id + )); + + // Check that LastUpdate has existing_neurons + 1 elements now + assert_eq!( + LastUpdate::::get(NetUidStorageIndex::from(netuid)).len(), + (existing_neurons + 1) as usize + ); + }); +} + // #[ignore] // #[test] // fn test_hotkey_swap_ok() { diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 2a8e55cb63..8febdfe208 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -253,7 +253,7 @@ impl Pallet { SubnetworkN::::get(netuid) ); log::debug!("uid = {uid:?}"); - if uid < SubnetworkN::::get(netuid)) { + if uid < SubnetworkN::::get(netuid) { PruningScores::::mutate(netuid, |v| { if let Some(s) = v.get_mut(uid as usize) { *s = pruning_score; From 17b7adf34e3235a266a083479ecd82ee0b389575 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 15:45:17 -0700 Subject: [PATCH 323/379] prepare cross-platform artifacts --- .github/workflows/docker-localnet.yml | 36 ++++++++++++++++++--------- scripts/install_build_env.sh | 16 +++++++++--- 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 821d55f6e9..b5625d9a3e 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -58,7 +58,13 @@ jobs: strategy: matrix: runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, cax41] + platform: ["linux/amd64", "linux/arm64"] + + # runs-on: [self-hosted, cax41] + runs-on: > + ${{ matrix.platform == 'linux/arm64' + && fromJson('["self-hosted", "type-cax41", "image-arm-app-docker-ce"]') + || fromJson('["self-hosted", "type-cax41"]') }} steps: - name: Checkout code @@ -66,7 +72,7 @@ jobs: with: ref: ${{ needs.setup.outputs.ref }} - - name: Install rust + dependencies + - name: Install Rust + dependencies run: | chmod +x ./scripts/install_build_env.sh ./scripts/install_build_env.sh @@ -79,23 +85,29 @@ jobs: - name: Build binaries run: | export PATH="$HOME/.cargo/bin:$PATH" + + ARCH="${{ matrix.platform == 'linux/arm64' && 'aarch64' || 'x86_64' }}" + TARGET="${ARCH}-unknown-linux-gnu" + + rustup target add "$TARGET" + export CARGO_BUILD_TARGET="$TARGET" + if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then ./scripts/localnet.sh --build-only else ./scripts/localnet.sh False --build-only fi - - name: Prepare artifacts for upload + - name: Prepare artifacts run: | - mkdir -p build/ci_target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor build/ci_target/${{ matrix.runtime }}/release/node-subtensor || true - - mkdir -p build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true - -# mkdir -p build/scripts/ -# cp -v snapshot.json build/ || true -# cp -v scripts/localnet.sh build/scripts/localnet.sh || true + ARCH=$(echo "${{ matrix.platform }}" | cut -d'/' -f2) + RUNTIME="${{ matrix.runtime }}" + + mkdir -p build/ci_target/${ARCH}/${{ matrix.runtime }}/release/ + cp -v target/${CARGO_BUILD_TARGET}/${{ matrix.runtime }}/release/node-subtensor build/ci_target/${ARCH}/${{ matrix.runtime }}/release/ + + mkdir -p build/ci_target/${ARCH}/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ + cp -v target/${CARGO_BUILD_TARGET}/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/ci_target/${ARCH}/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ - name: Upload artifacts uses: actions/upload-artifact@v4 diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index ab18da5134..10d9629f53 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -35,7 +35,7 @@ else fi fi -# Linux system dependencies +# System Dependencies if [ "$OS" = "Linux" ]; then echo "[+] Installing dependencies on Linux..." @@ -44,15 +44,15 @@ if [ "$OS" = "Linux" ]; then else $SUDO sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true $SUDO apt-get update + $SUDO apt-get install -y ca-certificates $SUDO apt-get install -y --no-install-recommends \ - curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev \ + gcc-aarch64-linux-gnu gcc-x86-64-linux-gnu fi -# macOS system dependencies elif [ "$OS" = "Mac" ]; then echo "[+] Installing dependencies on macOS..." - # Check if brew is installed if ! command -v brew &> /dev/null; then echo "[!] Homebrew not found. Installing..." /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" @@ -72,6 +72,8 @@ else exit 1 fi +# Rust Toolchain + echo "[+] Installing Rust toolchain..." curl https://sh.rustup.rs -sSf | sh -s -- -y @@ -80,6 +82,12 @@ source "$HOME/.cargo/env" || export PATH="$HOME/.cargo/bin:$PATH" rustup toolchain install 1.88.0 --profile minimal rustup default 1.88.0 + +# Add Rust Targets + +echo "Adding Rust targets for wasm + cross-arch binaries..." rustup target add wasm32v1-none +rustup target add aarch64-unknown-linux-gnu +rustup target add x86_64-unknown-linux-gnu echo "[✓] Environment setup complete." \ No newline at end of file From fe103e6a2c053e831b3bf7646b9fb15f472487ae Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:48:52 -0400 Subject: [PATCH 324/379] Revert bad rename for SubnetNotExists error --- pallets/admin-utils/src/tests/mod.rs | 2 +- pallets/subtensor/src/coinbase/root.rs | 2 +- pallets/subtensor/src/lib.rs | 4 ++-- pallets/subtensor/src/staking/recycle_alpha.rs | 10 ++-------- pallets/subtensor/src/staking/remove_stake.rs | 5 +---- pallets/subtensor/src/staking/set_children.rs | 5 +---- pallets/subtensor/src/subnets/registration.rs | 10 ++-------- pallets/subtensor/src/subnets/subnet.rs | 7 ++----- pallets/subtensor/src/subnets/uids.rs | 5 +---- pallets/subtensor/src/tests/children.rs | 10 +++++----- pallets/subtensor/src/tests/networks.rs | 2 +- pallets/subtensor/src/tests/recycle_alpha.rs | 4 ++-- pallets/subtensor/src/tests/subnet.rs | 2 +- pallets/subtensor/src/transaction_extension.rs | 2 +- 14 files changed, 23 insertions(+), 47 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 0e0232859b..b6cafb71b7 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2638,7 +2638,7 @@ fn test_trim_to_max_allowed_uids() { NetUid::from(42), new_max_n ), - pallet_subtensor::Error::::MechanismDoesNotExist + pallet_subtensor::Error::::SubnetNotExists ); // New max n less than lower bound diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index ac99d03838..4cb9f177e1 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -369,7 +369,7 @@ impl Pallet { // 1. --- The network exists? ensure!( Self::if_subnet_exist(netuid) && netuid != NetUid::ROOT, - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); // 2. --- Perform the cleanup before removing the network. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index e452e858bc..f53ee4f58a 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1985,7 +1985,7 @@ pub enum CustomTransactionError { ColdkeyInSwapSchedule, StakeAmountTooLow, BalanceTooLow, - SubnetDoesntExist, + SubnetNotExists, HotkeyAccountDoesntExist, NotEnoughStakeToWithdraw, RateLimitExceeded, @@ -2010,7 +2010,7 @@ impl From for u8 { CustomTransactionError::ColdkeyInSwapSchedule => 0, CustomTransactionError::StakeAmountTooLow => 1, CustomTransactionError::BalanceTooLow => 2, - CustomTransactionError::SubnetDoesntExist => 3, + CustomTransactionError::SubnetNotExists => 3, CustomTransactionError::HotkeyAccountDoesntExist => 4, CustomTransactionError::NotEnoughStakeToWithdraw => 5, CustomTransactionError::RateLimitExceeded => 6, diff --git a/pallets/subtensor/src/staking/recycle_alpha.rs b/pallets/subtensor/src/staking/recycle_alpha.rs index 371c5895e8..7334c8126a 100644 --- a/pallets/subtensor/src/staking/recycle_alpha.rs +++ b/pallets/subtensor/src/staking/recycle_alpha.rs @@ -23,10 +23,7 @@ impl Pallet { ) -> DispatchResult { let coldkey: T::AccountId = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), @@ -91,10 +88,7 @@ impl Pallet { ) -> DispatchResult { let coldkey = ensure_signed(origin)?; - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( !netuid.is_root(), diff --git a/pallets/subtensor/src/staking/remove_stake.rs b/pallets/subtensor/src/staking/remove_stake.rs index bb136c1196..9d610ea88f 100644 --- a/pallets/subtensor/src/staking/remove_stake.rs +++ b/pallets/subtensor/src/staking/remove_stake.rs @@ -443,10 +443,7 @@ impl Pallet { pub fn destroy_alpha_in_out_stakes(netuid: NetUid) -> DispatchResult { // 1) Ensure the subnet exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // 2) Owner / lock cost. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); diff --git a/pallets/subtensor/src/staking/set_children.rs b/pallets/subtensor/src/staking/set_children.rs index b4629ec54d..cf7103b7ab 100644 --- a/pallets/subtensor/src/staking/set_children.rs +++ b/pallets/subtensor/src/staking/set_children.rs @@ -62,10 +62,7 @@ impl Pallet { ); // Check that the network we are trying to create the child on exists. - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // Check that the coldkey owns the hotkey. ensure!( diff --git a/pallets/subtensor/src/subnets/registration.rs b/pallets/subtensor/src/subnets/registration.rs index 90ba2ea1aa..bd7bdeed57 100644 --- a/pallets/subtensor/src/subnets/registration.rs +++ b/pallets/subtensor/src/subnets/registration.rs @@ -78,10 +78,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( @@ -236,10 +233,7 @@ impl Pallet { !netuid.is_root(), Error::::RegistrationNotPermittedOnRootSubnet ); - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); // --- 3. Ensure the passed network allows registrations. ensure!( diff --git a/pallets/subtensor/src/subnets/subnet.rs b/pallets/subtensor/src/subnets/subnet.rs index f44859bbe9..8439297e14 100644 --- a/pallets/subtensor/src/subnets/subnet.rs +++ b/pallets/subtensor/src/subnets/subnet.rs @@ -343,7 +343,7 @@ impl Pallet { /// /// # Raises /// - /// * `Error::::MechanismDoesNotExist`: If the subnet does not exist. + /// * `Error::::SubnetNotExists`: If the subnet does not exist. /// * `DispatchError::BadOrigin`: If the caller is not the subnet owner. /// * `Error::::FirstEmissionBlockNumberAlreadySet`: If the last emission block number has already been set. /// @@ -351,10 +351,7 @@ impl Pallet { /// /// * `DispatchResult`: A result indicating the success or failure of the operation. pub fn do_start_call(origin: T::RuntimeOrigin, netuid: NetUid) -> DispatchResult { - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); Self::ensure_subnet_owner(origin, netuid)?; ensure!( FirstEmissionBlockNumber::::get(netuid).is_none(), diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 2fcf981780..b68fabfbd5 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -133,10 +133,7 @@ impl Pallet { pub fn trim_to_max_allowed_uids(netuid: NetUid, max_n: u16) -> DispatchResult { // Reasonable limits - ensure!( - Self::if_subnet_exist(netuid), - Error::::MechanismDoesNotExist - ); + ensure!(Self::if_subnet_exist(netuid), Error::::SubnetNotExists); ensure!( max_n >= MinAllowedUids::::get(netuid), Error::::InvalidValue diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index cf11cf6190..0fee0af2ca 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -62,7 +62,7 @@ fn test_do_set_child_singular_network_does_not_exist() { netuid, vec![(proportion, child)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -328,7 +328,7 @@ fn test_add_singular_child() { netuid, vec![(u64::MAX, child)] ), - Err(Error::::MechanismDoesNotExist.into()) + Err(Error::::SubnetNotExists.into()) ); add_network(netuid, 1, 0); step_rate_limit(&TransactionType::SetChildren, netuid); @@ -472,7 +472,7 @@ fn test_do_set_empty_children_network_does_not_exist() { netuid, vec![] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -601,7 +601,7 @@ fn test_do_schedule_children_multiple_network_does_not_exist() { netuid, vec![(proportion, child1)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } @@ -1200,7 +1200,7 @@ fn test_do_revoke_children_multiple_network_does_not_exist() { netuid, vec![(u64::MAX / 2, child1), (u64::MAX / 2, child2)] ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/tests/networks.rs b/pallets/subtensor/src/tests/networks.rs index 779879a129..42de84f54f 100644 --- a/pallets/subtensor/src/tests/networks.rs +++ b/pallets/subtensor/src/tests/networks.rs @@ -270,7 +270,7 @@ fn dissolve_nonexistent_subnet_fails() { new_test_ext(0).execute_with(|| { assert_err!( SubtensorModule::do_dissolve_network(9_999.into()), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/tests/recycle_alpha.rs b/pallets/subtensor/src/tests/recycle_alpha.rs index d230af6f30..173a03aea1 100644 --- a/pallets/subtensor/src/tests/recycle_alpha.rs +++ b/pallets/subtensor/src/tests/recycle_alpha.rs @@ -430,7 +430,7 @@ fn test_recycle_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); assert_noop!( @@ -502,7 +502,7 @@ fn test_burn_errors() { 100_000.into(), 99.into() // non-existent subnet ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); assert_noop!( diff --git a/pallets/subtensor/src/tests/subnet.rs b/pallets/subtensor/src/tests/subnet.rs index a1331a1707..a11eae759e 100644 --- a/pallets/subtensor/src/tests/subnet.rs +++ b/pallets/subtensor/src/tests/subnet.rs @@ -52,7 +52,7 @@ fn test_do_start_call_fail_with_not_existed_subnet() { <::RuntimeOrigin>::signed(coldkey_account_id), netuid ), - Error::::MechanismDoesNotExist + Error::::SubnetNotExists ); }); } diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index b56dff0ea0..42e45b5fd4 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -53,7 +53,7 @@ where if let Err(err) = result { Err(match err { Error::::AmountTooLow => CustomTransactionError::StakeAmountTooLow.into(), - Error::::SubnetNotExists => CustomTransactionError::SubnetDoesntExist.into(), + Error::::SubnetNotExists => CustomTransactionError::SubnetNotExists.into(), Error::::NotEnoughBalanceToStake => CustomTransactionError::BalanceTooLow.into(), Error::::HotKeyAccountNotExists => { CustomTransactionError::HotkeyAccountDoesntExist.into() From 8cf762e54609521c4b901e276975268f02686389 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 18 Sep 2025 18:56:39 -0400 Subject: [PATCH 325/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index b6857cf7ae..254bec73a3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 317, + spec_version: 318, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From a75994c9e2a227b3c77a2e41cd3b019967418ab0 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 16:17:44 -0700 Subject: [PATCH 326/379] try step 1 --- .github/workflows/docker-localnet.yml | 49 ++++++++++++++++++++------- scripts/install_build_env.sh | 16 ++++++--- scripts/localnet.sh | 20 +++++++++-- 3 files changed, 67 insertions(+), 18 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 821d55f6e9..cf2b39de0f 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -57,8 +57,25 @@ jobs: needs: setup strategy: matrix: + platform: + - runner: macos-13 + arch: x86_64 + triple: x86_64-apple-darwin + + - runner: macos-14 + arch: aarch64 + triple: aarch64-apple-darwin + + - runner: ubuntu-latest + arch: x86_64 + triple: x86_64-unknown-linux-gnu + + - runner: ubuntu-24.04-arm + arch: aarch64 + triple: aarch64-unknown-linux-gnu + runtime: ["fast-runtime", "non-fast-runtime"] - runs-on: [self-hosted, cax41] + runs-on: ${{ matrix.platform.runner }} steps: - name: Checkout code @@ -66,11 +83,15 @@ jobs: with: ref: ${{ needs.setup.outputs.ref }} - - name: Install rust + dependencies + - name: Install Rust + dependencies run: | chmod +x ./scripts/install_build_env.sh ./scripts/install_build_env.sh + - name: Add Rust target triple + run: | + rustup target add ${{ matrix.platform.triple }} + - name: Patch limits for local run run: | chmod +x ./scripts/localnet_patch.sh @@ -79,6 +100,8 @@ jobs: - name: Build binaries run: | export PATH="$HOME/.cargo/bin:$PATH" + export CARGO_BUILD_TARGET="${{ matrix.platform.triple }}" + if [ "${{ matrix.runtime }}" = "fast-runtime" ]; then ./scripts/localnet.sh --build-only else @@ -87,20 +110,22 @@ jobs: - name: Prepare artifacts for upload run: | - mkdir -p build/ci_target/${{ matrix.runtime }}/release/ || true - cp -v target/${{ matrix.runtime }}/release/node-subtensor build/ci_target/${{ matrix.runtime }}/release/node-subtensor || true - - mkdir -p build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/ || true - cp -v target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm build/ci_target/${{ matrix.runtime }}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm || true + ARCH="${{ matrix.platform.arch }}" + RUNTIME="${{ matrix.runtime }}" + TRIPLE="${{ matrix.platform.triple }}" + + mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/ + cp -v target/${TRIPLE}/${RUNTIME}/release/node-subtensor \ + build/ci_target/${ARCH}/${RUNTIME}/release/ -# mkdir -p build/scripts/ -# cp -v snapshot.json build/ || true -# cp -v scripts/localnet.sh build/scripts/localnet.sh || true + mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ + cp -v target/${TRIPLE}/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ - - name: Upload artifacts + - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: artifacts-${{ matrix.runtime }} + name: binaries-${{ matrix.platform.triple }}-${{ matrix.runtime }} path: build/ if-no-files-found: error diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index ab18da5134..10d9629f53 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -35,7 +35,7 @@ else fi fi -# Linux system dependencies +# System Dependencies if [ "$OS" = "Linux" ]; then echo "[+] Installing dependencies on Linux..." @@ -44,15 +44,15 @@ if [ "$OS" = "Linux" ]; then else $SUDO sed -i 's|http://archive.ubuntu.com/ubuntu|http://mirrors.edge.kernel.org/ubuntu|g' /etc/apt/sources.list || true $SUDO apt-get update + $SUDO apt-get install -y ca-certificates $SUDO apt-get install -y --no-install-recommends \ - curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev + curl build-essential protobuf-compiler clang git pkg-config libssl-dev llvm libudev-dev \ + gcc-aarch64-linux-gnu gcc-x86-64-linux-gnu fi -# macOS system dependencies elif [ "$OS" = "Mac" ]; then echo "[+] Installing dependencies on macOS..." - # Check if brew is installed if ! command -v brew &> /dev/null; then echo "[!] Homebrew not found. Installing..." /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" @@ -72,6 +72,8 @@ else exit 1 fi +# Rust Toolchain + echo "[+] Installing Rust toolchain..." curl https://sh.rustup.rs -sSf | sh -s -- -y @@ -80,6 +82,12 @@ source "$HOME/.cargo/env" || export PATH="$HOME/.cargo/bin:$PATH" rustup toolchain install 1.88.0 --profile minimal rustup default 1.88.0 + +# Add Rust Targets + +echo "Adding Rust targets for wasm + cross-arch binaries..." rustup target add wasm32v1-none +rustup target add aarch64-unknown-linux-gnu +rustup target add x86_64-unknown-linux-gnu echo "[✓] Environment setup complete." \ No newline at end of file diff --git a/scripts/localnet.sh b/scripts/localnet.sh index 703dd8b885..a15a9469fa 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -54,9 +54,25 @@ if [ ! -d "$SPEC_PATH" ]; then mkdir -p "$SPEC_PATH" fi -if [[ $BUILD_BINARY == "1" ]]; then +if [[ "$BUILD_BINARY" == "1" ]]; then echo "*** Building substrate binary..." - CARGO_TARGET_DIR="$BUILD_DIR" cargo build --workspace --profile=release --features "$FEATURES" --manifest-path "$BASE_DIR/Cargo.toml" + + BUILD_CMD=( + cargo build + --workspace + --profile=release + --features "$FEATURES" + --manifest-path "$BASE_DIR/Cargo.toml" + ) + + if [[ -n "$CARGO_BUILD_TARGET" ]]; then + echo "[+] Cross-compiling for target: $CARGO_BUILD_TARGET" + BUILD_CMD+=(--target "$CARGO_BUILD_TARGET") + else + echo "[+] Building for host architecture" + fi + + CARGO_TARGET_DIR="$BUILD_DIR" "${BUILD_CMD[@]}" echo "*** Binary compiled" fi From 788e04b8caa009a62d4ad53048609f92f1234f71 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 16:36:41 -0700 Subject: [PATCH 327/379] step 2 --- .github/workflows/docker-localnet.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index cf2b39de0f..0e19bab9ec 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -54,18 +54,11 @@ jobs: # build artifacts for fast-runtime and non-fast-runtime build: + name: Build • ${{ matrix.platform.triple }} • ${{ matrix.runtime }} needs: setup strategy: matrix: platform: - - runner: macos-13 - arch: x86_64 - triple: x86_64-apple-darwin - - - runner: macos-14 - arch: aarch64 - triple: aarch64-apple-darwin - - runner: ubuntu-latest arch: x86_64 triple: x86_64-unknown-linux-gnu @@ -75,6 +68,7 @@ jobs: triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] + runs-on: ${{ matrix.platform.runner }} steps: From c71d3ec46ce7699a579f512f4f7dccdaad747f41 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 17:18:58 -0700 Subject: [PATCH 328/379] step 3 --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 0e19bab9ec..15ba971001 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -109,11 +109,11 @@ jobs: TRIPLE="${{ matrix.platform.triple }}" mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/ - cp -v target/${TRIPLE}/${RUNTIME}/release/node-subtensor \ + cp -v target/${TRIPLE}/release/node-subtensor \ build/ci_target/${ARCH}/${RUNTIME}/release/ mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ - cp -v target/${TRIPLE}/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + cp -v target/${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ - name: Upload artifact From 5332da94932c9dfd664515f7ddcb665104f554e9 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 18:04:18 -0700 Subject: [PATCH 329/379] step 4 - TRIPLE instead of arch --- .github/workflows/docker-localnet.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 15ba971001..7840a537e5 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -60,11 +60,9 @@ jobs: matrix: platform: - runner: ubuntu-latest - arch: x86_64 triple: x86_64-unknown-linux-gnu - runner: ubuntu-24.04-arm - arch: aarch64 triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] @@ -104,17 +102,16 @@ jobs: - name: Prepare artifacts for upload run: | - ARCH="${{ matrix.platform.arch }}" RUNTIME="${{ matrix.runtime }}" TRIPLE="${{ matrix.platform.triple }}" - mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/ - cp -v target/${TRIPLE}/release/node-subtensor \ - build/ci_target/${ARCH}/${RUNTIME}/release/ + mkdir -p build/ci_target/${RUNTIME}${TRIPLE}/release/ + cp -v target/${RUNTIME}${TRIPLE}/release/node-subtensor \ + build/ci_target/${RUNTIME}${TRIPLE}/release/ - mkdir -p build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ - cp -v target/${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ - build/ci_target/${ARCH}/${RUNTIME}/release/wbuild/node-subtensor-runtime/ + mkdir -p build/ci_target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/ + cp -v target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + build/ci_target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/ - name: Upload artifact uses: actions/upload-artifact@v4 From 59da9cc62c466a1b7863a23f9ee79b0ca2151033 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 18:26:12 -0700 Subject: [PATCH 330/379] ops - missed / --- .github/workflows/docker-localnet.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 7840a537e5..146f9da3c4 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -105,13 +105,13 @@ jobs: RUNTIME="${{ matrix.runtime }}" TRIPLE="${{ matrix.platform.triple }}" - mkdir -p build/ci_target/${RUNTIME}${TRIPLE}/release/ - cp -v target/${RUNTIME}${TRIPLE}/release/node-subtensor \ - build/ci_target/${RUNTIME}${TRIPLE}/release/ + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/ + cp -v target/${RUNTIME}/${TRIPLE}/release/node-subtensor \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/ - mkdir -p build/ci_target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/ - cp -v target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ - build/ci_target/${RUNTIME}${TRIPLE}/release/wbuild/node-subtensor-runtime/ + mkdir -p build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ + cp -v target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + build/ci_target/${RUNTIME}/${TRIPLE}/release/wbuild/node-subtensor-runtime/ - name: Upload artifact uses: actions/upload-artifact@v4 From b43c96c43f476a9f77179ded15b4531e40494d18 Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 18 Sep 2025 19:32:45 -0700 Subject: [PATCH 331/379] check TARGETARCH and TARGETOS --- .github/workflows/docker-localnet.yml | 16 ++++++---------- Dockerfile-localnet | 4 ++++ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 146f9da3c4..32efed099c 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -134,17 +134,12 @@ jobs: with: ref: ${{ needs.setup.outputs.ref }} - - name: Download fast-runtime artifacts - uses: actions/download-artifact@v4 + - name: Download all binary artifacts + uses: actions/download-artifact@v5 with: - name: artifacts-fast-runtime - path: ${{ github.workspace }} - - - name: Download non-fast-runtime artifacts - uses: actions/download-artifact@v4 - with: - name: artifacts-non-fast-runtime - path: ${{ github.workspace }} + pattern: binaries-* + path: build/ci_target + merge-multiple: true # to be make sure - name: Print full workspace directory tree @@ -180,6 +175,7 @@ jobs: file: Dockerfile-localnet build-args: | BUILT_IN_CI="Boom shakalaka" + push: true platforms: linux/amd64,linux/arm64 tags: | diff --git a/Dockerfile-localnet b/Dockerfile-localnet index d365174bd2..6fa8671c6e 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -1,5 +1,9 @@ ARG BASE_IMAGE=ubuntu:latest +ARG TARGETARCH +ARG TARGETOS +RUN echo "Building for: $TARGETOS / $TARGETARCH" + FROM $BASE_IMAGE AS builder SHELL ["/bin/bash", "-c"] From a230878eab67db619510ff637aac4f7cfd6d23ba Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 06:54:42 -0700 Subject: [PATCH 332/379] check TARGETARCH in debug --- Dockerfile-localnet | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 6fa8671c6e..33b2fdff0b 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -1,10 +1,10 @@ ARG BASE_IMAGE=ubuntu:latest +FROM $BASE_IMAGE AS builder + ARG TARGETARCH -ARG TARGETOS -RUN echo "Building for: $TARGETOS / $TARGETARCH" +RUN echo ">>> Building for $TARGETARCH" -FROM $BASE_IMAGE AS builder SHELL ["/bin/bash", "-c"] # Set noninteractive mode for apt-get From 8b3d120d7fa8800c72b124fc9639b41da3e051b2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 19 Sep 2025 14:07:05 +0000 Subject: [PATCH 333/379] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index d6a199b0f1..20f377a925 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -233,7 +233,7 @@ mod dispatches { /// #[pallet::call_index(96)] #[pallet::weight((Weight::from_parts(67_770_000, 0) - .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -302,7 +302,7 @@ mod dispatches { /// #[pallet::call_index(100)] #[pallet::weight((Weight::from_parts(100_500_000, 0) - .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -1314,8 +1314,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(51_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(57_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) } @@ -1601,8 +1601,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(36_u64)) - .saturating_add(T::DbWeight::get().writes(50_u64)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(56_u64)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -2265,7 +2265,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From 3b3dcccdc4d81ad692429f3e28ce5b9c6116726b Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 07:41:57 -0700 Subject: [PATCH 334/379] map required binaries for the specific architecture --- Dockerfile-localnet | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 33b2fdff0b..7809d3a983 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -30,8 +30,11 @@ ENV PATH="/root/.cargo/bin:${PATH}" RUN apt-get update && apt-get install -y ca-certificates # Echo the value -RUN echo "BUILT_IN_CI=$BUILT_IN_CI" +RUN echo "[*] BUILT_IN_CI=$BUILT_IN_CI" # Install deps if $BUILT_IN_CI wasn't passed +# BUILD_TRIPLE are located on `.github/workflows/docker-localnet.yml` in a job `build:matrix:platform:triple` +# If these are updated in the workflow, then we need to update here in `elif [ -d "/build/ci_target" ]` section. +# We substitute the related binaries for the required Docker image layer architecture. RUN if [ -z "$BUILT_IN_CI" ]; then \ echo "[*] Installing env and building binaries..." && \ chmod +x ./scripts/install_build_env.sh && \ @@ -39,9 +42,25 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ ./scripts/localnet.sh --build-only && \ ./scripts/localnet.sh False --build-only ; \ elif [ -d "/build/ci_target" ]; then \ - echo "[*] BUILT_IN_CI is set → skipping install + build." && \ - echo "[*] Renaming /build/ci_target → /build/target" && \ - mv /build/ci_target /build/target ; \ + echo "[*] BUILT_IN_CI is set → using prebuilt binaries." && \ + \ + echo "[*] Mapping TARGETARCH=$TARGETARCH to Rust triple..." && \ + if [ "$TARGETARCH" = "amd64" ]; then \ + export BUILD_TRIPLE="x86_64-unknown-linux-gnu"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + export BUILD_TRIPLE="aarch64-unknown-linux-gnu"; \ + else \ + echo "[!] Unknown TARGETARCH: $TARGETARCH" && exit 1; \ + fi && \ + echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" && \ + \ + echo "[*] Copying binaries to expected /build/target layout..." && \ + for RUNTIME in fast-runtime non-fast-runtime; do \ + mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime && \ + cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor /build/target/${RUNTIME}/release/node-subtensor && \ + cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm ; \ + done ; \ else \ echo "[!] BUILT_IN_CI is set but /build/ci_target not found. Exiting." && exit 1 ; \ fi From d5d4ca53714f2698d7678449de6a4d49e11e4f3a Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 08:22:44 -0700 Subject: [PATCH 335/379] making logic clear --- Dockerfile-localnet | 31 +++++---------------- scripts/install_prebuilt_binaries.sh | 41 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 24 deletions(-) create mode 100644 scripts/install_prebuilt_binaries.sh diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 7809d3a983..153d74996e 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -30,11 +30,11 @@ ENV PATH="/root/.cargo/bin:${PATH}" RUN apt-get update && apt-get install -y ca-certificates # Echo the value -RUN echo "[*] BUILT_IN_CI=$BUILT_IN_CI" -# Install deps if $BUILT_IN_CI wasn't passed -# BUILD_TRIPLE are located on `.github/workflows/docker-localnet.yml` in a job `build:matrix:platform:triple` -# If these are updated in the workflow, then we need to update here in `elif [ -d "/build/ci_target" ]` section. -# We substitute the related binaries for the required Docker image layer architecture. +RUN echo "BUILT_IN_CI=$BUILT_IN_CI" + +# If the BUILT_IN_CI local environment variable isn't passed, node building occurs within Docker (used for local Docker +# image building). Otherwise, building occurs in advance for each architecture (in CI) and is substituted for the +# required image layer during the build. RUN if [ -z "$BUILT_IN_CI" ]; then \ echo "[*] Installing env and building binaries..." && \ chmod +x ./scripts/install_build_env.sh && \ @@ -42,25 +42,8 @@ RUN if [ -z "$BUILT_IN_CI" ]; then \ ./scripts/localnet.sh --build-only && \ ./scripts/localnet.sh False --build-only ; \ elif [ -d "/build/ci_target" ]; then \ - echo "[*] BUILT_IN_CI is set → using prebuilt binaries." && \ - \ - echo "[*] Mapping TARGETARCH=$TARGETARCH to Rust triple..." && \ - if [ "$TARGETARCH" = "amd64" ]; then \ - export BUILD_TRIPLE="x86_64-unknown-linux-gnu"; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - export BUILD_TRIPLE="aarch64-unknown-linux-gnu"; \ - else \ - echo "[!] Unknown TARGETARCH: $TARGETARCH" && exit 1; \ - fi && \ - echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" && \ - \ - echo "[*] Copying binaries to expected /build/target layout..." && \ - for RUNTIME in fast-runtime non-fast-runtime; do \ - mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime && \ - cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor /build/target/${RUNTIME}/release/node-subtensor && \ - cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ - /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm ; \ - done ; \ + chmod +x ./scripts/install_prebuilt_binaries.sh && \ + ./scripts/install_prebuilt_binaries.sh ; \ else \ echo "[!] BUILT_IN_CI is set but /build/ci_target not found. Exiting." && exit 1 ; \ fi diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh new file mode 100644 index 0000000000..cbf3907177 --- /dev/null +++ b/scripts/install_prebuilt_binaries.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +echo "" +echo "######################################################################" +echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" +echo "### ###" +echo "### This script is used by: ###" +echo "### • .github/workflows/docker-localnet.yml ###" +echo "### • Dockerfile-localnet ###" +echo "### ###" +echo "### Any changes may break CI builds or local Docker environments. ###" +echo "######################################################################" +echo "" + +set -e + +echo "[*] BUILT_IN_CI is set → using prebuilt binaries." +echo "[*] Mapping TARGETARCH=${TARGETARCH} to Rust triple..." + +# BUILD_TRIPLE are located on `.github/workflows/docker-localnet.yml` in a job `build:matrix:platform:triple` +# If these are updated in the workflow, then we need to update here in `elif [ -d "/build/ci_target" ]` section. +# We substitute the related binaries for the required Docker image layer architecture. +if [ "$TARGETARCH" = "amd64" ]; then + BUILD_TRIPLE="x86_64-unknown-linux-gnu" +elif [ "$TARGETARCH" = "arm64" ]; then + BUILD_TRIPLE="aarch64-unknown-linux-gnu" +else + echo "[!] Unknown TARGETARCH: ${TARGETARCH}" >&2 + exit 1 +fi + +echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" +echo "[*] Copying binaries to expected /build/target layout..." + +for RUNTIME in fast-runtime non-fast-runtime; do + mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime + cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ + /build/target/${RUNTIME}/release/node-subtensor + cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm +done \ No newline at end of file From 9b050ebcde2247e751a5ecd23f3f37da60c48fa7 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 09:03:50 -0700 Subject: [PATCH 336/379] wrong path in Download all binary artifacts --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 32efed099c..fb564ccfa6 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -138,7 +138,7 @@ jobs: uses: actions/download-artifact@v5 with: pattern: binaries-* - path: build/ci_target + path: build/ merge-multiple: true # to be make sure From 7c62b8f4e9617dc746cbe1712db45254ce47af52 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 19 Sep 2025 12:13:41 -0400 Subject: [PATCH 337/379] Add error logging where runtime asserts were removed --- pallets/subtensor/src/epoch/math.rs | 112 +++++++++++++++++++++++++++- pallets/subtensor/src/utils/misc.rs | 6 ++ 2 files changed, 116 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index ccc23a1bed..4b613fa961 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -292,6 +292,14 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { /// Returns x / y for input vectors x and y, if y == 0 return 0. pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { + if x.len() != y.len() { + log::error!( + "vecdiv input lengths are not equal: {:?} != {:?}", + x.len(), + y.len() + ); + } + let zero = I32F32::saturating_from_num(0); let mut out = Vec::with_capacity(x.len()); @@ -477,6 +485,14 @@ pub fn inplace_col_max_upscale(x: &mut [Vec]) { // Apply mask to vector, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { + if mask.len() != vector.len() { + log::error!( + "inplace_mask_vector input lengths are not equal: {:?} != {:?}", + mask.len(), + vector.len() + ); + } + if mask.is_empty() { return; } @@ -490,6 +506,13 @@ pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { // Apply mask to matrix, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_matrix input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = mask.first() else { return; }; @@ -513,6 +536,13 @@ pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { // Apply row mask to matrix, mask=true will mask out, i.e. set to 0. pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_rows input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } let Some(first_row) = matrix.first() else { return; }; @@ -528,6 +558,13 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { // Apply column mask to matrix, mask=true will mask out, i.e. set to 0. // Assumes each column has the same length. pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { + if mask.len() != matrix.len() { + log::error!( + "inplace_mask_cols input sizes are not equal: {:?} != {:?}", + mask.len(), + matrix.len() + ); + } if matrix.is_empty() { return; }; @@ -603,10 +640,13 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: return; } if matrix.len() != first_row.len() { - log::error!("inplace_mask_diag_except_index: input matrix is not square"); + log::error!( + "inplace_mask_diag input matrix is now square: {:?} != {:?}", + matrix.len(), + first_row.len() + ); return; } - let diag_at_index = matrix .get(except_index as usize) .and_then(|row| row.get(except_index as usize)) @@ -752,6 +792,13 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { if cols == 0 { return vec![]; } + if matrix.len() != vector.len() { + log::error!( + "matmul input sizes are not equal: {:?} != {:?}", + matrix.len(), + vector.len() + ); + } let zero = I32F32::saturating_from_num(0.0); let mut acc = vec![zero; cols]; @@ -783,6 +830,13 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], mat2: &[Vec], ratio: I32F32) -> if mat1.is_empty() || mat1.first().map(|r| r.is_empty()).unwrap_or(true) { return vec![vec![]]; } + if mat1.len() != mat2.len() { + log::error!( + "interpolate mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); + } let zero = I32F32::saturating_from_num(0.0); let cols = mat1.first().map(|r| r.len()).unwrap_or(0); @@ -1147,8 +1213,16 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> for row1 in mat1.iter() { let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { + log::error!("interpolate: No more rows in mat2"); break; }; + if row1.len() != row2.len() { + log::error!( + "interpolate row1.len() != row2.len(): {:?} != {:?}", + row1.len(), + row2.len() + ); + } // Walk elements of row1, row2, and out_row in lockstep; stop at the shortest. let mut r1_it = row1.iter(); @@ -1184,6 +1258,11 @@ pub fn interpolate_sparse( } if mat1.len() != mat2.len() { // In case if sizes mismatch, return clipped weights + log::error!( + "interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}", + mat1.len(), + mat2.len() + ); return mat2.to_owned(); } let rows = mat1.len(); @@ -1329,6 +1408,14 @@ pub fn mat_ema_sparse( old: &[Vec<(u16, I32F32)>], alpha: I32F32, ) -> Vec> { + if new.len() != old.len() { + log::error!( + "mat_ema_sparse: new.len() == old.len(): {:?} != {:?}", + new.len(), + old.len() + ); + } + let zero = I32F32::saturating_from_num(0.0); let one_minus_alpha = I32F32::saturating_from_num(1.0).saturating_sub(alpha); @@ -1381,6 +1468,12 @@ pub fn mat_ema_alpha_sparse( ) -> Vec> { // If shapes don't match, just return `new` if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); return new.to_owned(); } @@ -1397,6 +1490,15 @@ pub fn mat_ema_alpha_sparse( break; }; + if new_row.len() != old_row.len() || new_row.len() != alpha_row.len() { + log::error!( + "mat_ema_alpha_sparse row shapes don't match: {:?} vs. {:?} vs. {:?}", + old_row.len(), + new_row.len(), + alpha_row.len() + ); + } + // Densified accumulator sized to alpha_row length (columns outside are ignored). let mut decayed_values = vec![zero; alpha_row.len()]; @@ -1443,6 +1545,12 @@ pub fn mat_ema_alpha( // If outer dimensions don't match, return bonds unchanged if new.len() != old.len() || new.len() != alpha.len() { + log::error!( + "mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}", + old.len(), + new.len(), + alpha.len() + ); return old.to_owned(); } diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 8febdfe208..a4d4755e5d 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -259,6 +259,12 @@ impl Pallet { *s = pruning_score; } }); + } else { + log::error!( + "set_pruning_score_for_uid: uid >= SubnetworkN::::get(netuid): {:?} >= {:?}", + uid, + SubnetworkN::::get(netuid) + ); } } pub fn set_validator_permit_for_uid(netuid: NetUid, uid: u16, validator_permit: bool) { From f7be5c64e6e5127c13ef68272172be8dd3356334 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 11:39:07 -0700 Subject: [PATCH 338/379] dockerfile cleanup --- Dockerfile-localnet | 39 +++++++++++++--------------- scripts/install_prebuilt_binaries.sh | 6 +++++ scripts/localnet.sh | 6 +++++ 3 files changed, 30 insertions(+), 21 deletions(-) mode change 100644 => 100755 scripts/install_prebuilt_binaries.sh diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 153d74996e..749b83776a 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -2,9 +2,6 @@ ARG BASE_IMAGE=ubuntu:latest FROM $BASE_IMAGE AS builder -ARG TARGETARCH -RUN echo ">>> Building for $TARGETARCH" - SHELL ["/bin/bash", "-c"] # Set noninteractive mode for apt-get @@ -22,6 +19,8 @@ WORKDIR /build # Set up env var ARG BUILT_IN_CI +ARG TARGETARCH + ENV BUILT_IN_CI=${BUILT_IN_CI} ENV RUST_BACKTRACE=1 ENV PATH="/root/.cargo/bin:${PATH}" @@ -29,24 +28,22 @@ ENV PATH="/root/.cargo/bin:${PATH}" ## Ubdate certificates RUN apt-get update && apt-get install -y ca-certificates -# Echo the value -RUN echo "BUILT_IN_CI=$BUILT_IN_CI" - -# If the BUILT_IN_CI local environment variable isn't passed, node building occurs within Docker (used for local Docker -# image building). Otherwise, building occurs in advance for each architecture (in CI) and is substituted for the -# required image layer during the build. -RUN if [ -z "$BUILT_IN_CI" ]; then \ - echo "[*] Installing env and building binaries..." && \ - chmod +x ./scripts/install_build_env.sh && \ - ./scripts/install_build_env.sh && \ - ./scripts/localnet.sh --build-only && \ - ./scripts/localnet.sh False --build-only ; \ - elif [ -d "/build/ci_target" ]; then \ - chmod +x ./scripts/install_prebuilt_binaries.sh && \ - ./scripts/install_prebuilt_binaries.sh ; \ - else \ - echo "[!] BUILT_IN_CI is set but /build/ci_target not found. Exiting." && exit 1 ; \ - fi +# Debug +RUN echo ">>> Building for $TARGETARCH" +RUN echo ">>> BUILT_IN_CI=$BUILT_IN_CI" + +# Install requirements +RUN chmod +x ./scripts/install_build_env.sh +RUN ./scripts/install_build_env.sh + +## Build fast-runtime node +RUN ./scripts/localnet.sh --build-only +# Build non-fast-runtime +RUN ./scripts/localnet.sh False --build-only + +# We will prepare the necessary binaries if they are created in CI +RUN chmod +x ./scripts/install_prebuilt_binaries.sh +RUN ./scripts/install_prebuilt_binaries.sh # Verify the binaries was produced RUN test -e /build/target/fast-runtime/release/node-subtensor diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh old mode 100644 new mode 100755 index cbf3907177..ecec6b284c --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -1,5 +1,11 @@ #!/bin/bash +# We move the prebuild binaries required by the architecture if they were created in CI, otherwise exit with no error +if [ -z "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is not set. Skipping script..." + exit 0 +fi + echo "" echo "######################################################################" echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" diff --git a/scripts/localnet.sh b/scripts/localnet.sh index a15a9469fa..1b96baa19b 100755 --- a/scripts/localnet.sh +++ b/scripts/localnet.sh @@ -1,5 +1,11 @@ #!/bin/bash +# If binaries are compiled in CI then skip this script +if [ -n "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is set to '$BUILT_IN_CI'. Skipping script..." + exit 0 +fi + # Check if `--no-purge` passed as a parameter NO_PURGE=0 From 202fbf542d6d536880a1b3b5550be16250ebff07 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 13:58:16 -0700 Subject: [PATCH 339/379] more debug --- scripts/install_prebuilt_binaries.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index ecec6b284c..e53e7c412c 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -18,7 +18,7 @@ echo "### Any changes may break CI builds or local Docker environments. ###" echo "######################################################################" echo "" -set -e +set -x echo "[*] BUILT_IN_CI is set → using prebuilt binaries." echo "[*] Mapping TARGETARCH=${TARGETARCH} to Rust triple..." @@ -39,6 +39,11 @@ echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" echo "[*] Copying binaries to expected /build/target layout..." for RUNTIME in fast-runtime non-fast-runtime; do + echo "[*] Listing files in /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/" + ls -al /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/ || true + echo "[*] Listing wasm in wbuild/" + ls -al /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/ || true + mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ /build/target/${RUNTIME}/release/node-subtensor From 0a92c54a7fe47336094067b6a90464deaf03df59 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 14:54:09 -0700 Subject: [PATCH 340/379] change Listing --- scripts/install_prebuilt_binaries.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index e53e7c412c..d9ffd3f591 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -39,10 +39,8 @@ echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" echo "[*] Copying binaries to expected /build/target layout..." for RUNTIME in fast-runtime non-fast-runtime; do - echo "[*] Listing files in /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/" - ls -al /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/ || true - echo "[*] Listing wasm in wbuild/" - ls -al /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/ || true + echo "[*] Listing files in /build/" + ls -al /build/ || true mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ From b304de1b7cf38d4edd819cce084ddfaca372de47 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 15:34:06 -0700 Subject: [PATCH 341/379] more info --- scripts/install_prebuilt_binaries.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index d9ffd3f591..c65d6e3d90 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -39,8 +39,9 @@ echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" echo "[*] Copying binaries to expected /build/target layout..." for RUNTIME in fast-runtime non-fast-runtime; do - echo "[*] Listing files in /build/" - ls -al /build/ || true + echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" + find "$GITHUB_WORKSPACE" -type f | sort + echo "::endgroup::" mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ From df94f2529bf89c36be2cf3cd34d7c1d13c4659f9 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:09:07 -0700 Subject: [PATCH 342/379] wrong name --- scripts/install_prebuilt_binaries.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index c65d6e3d90..c70e340867 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -39,8 +39,8 @@ echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" echo "[*] Copying binaries to expected /build/target layout..." for RUNTIME in fast-runtime non-fast-runtime; do - echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" - find "$GITHUB_WORKSPACE" -type f | sort + echo "::group::/build directory tree" + find /build -type f | sort echo "::endgroup::" mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime From 90fae2aa957e61dc37d2ce83bb428f13277f1684 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:12:08 -0700 Subject: [PATCH 343/379] fix path --- scripts/install_prebuilt_binaries.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index c70e340867..8e9efc00fa 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -44,8 +44,8 @@ for RUNTIME in fast-runtime non-fast-runtime; do echo "::endgroup::" mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime - cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ + cp -v /build/build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ /build/target/${RUNTIME}/release/node-subtensor - cp -v /build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ + cp -v /build/build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm \ /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime/node_subtensor_runtime.compact.compressed.wasm done \ No newline at end of file From 1b978b2f0d397360497b2259610fd8b3eacab7af Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:38:27 -0700 Subject: [PATCH 344/379] cleanup --- .github/workflows/docker-localnet.yml | 9 +-------- Dockerfile-localnet | 4 ---- scripts/install_prebuilt_binaries.sh | 9 +++++---- 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index fb564ccfa6..6ad5c97f73 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -61,7 +61,6 @@ jobs: platform: - runner: ubuntu-latest triple: x86_64-unknown-linux-gnu - - runner: ubuntu-24.04-arm triple: aarch64-unknown-linux-gnu @@ -100,6 +99,7 @@ jobs: ./scripts/localnet.sh False --build-only fi + # use `ci_target` name bc .dockerignore excludes `target` - name: Prepare artifacts for upload run: | RUNTIME="${{ matrix.runtime }}" @@ -141,13 +141,6 @@ jobs: path: build/ merge-multiple: true - # to be make sure - - name: Print full workspace directory tree - run: | - echo "::group::GITHUB_WORKSPACE = $GITHUB_WORKSPACE" - find "$GITHUB_WORKSPACE" -type f | sort - echo "::endgroup::" - - name: Show current Git branch run: | echo "===============================" diff --git a/Dockerfile-localnet b/Dockerfile-localnet index 749b83776a..b4be1f9291 100644 --- a/Dockerfile-localnet +++ b/Dockerfile-localnet @@ -28,10 +28,6 @@ ENV PATH="/root/.cargo/bin:${PATH}" ## Ubdate certificates RUN apt-get update && apt-get install -y ca-certificates -# Debug -RUN echo ">>> Building for $TARGETARCH" -RUN echo ">>> BUILT_IN_CI=$BUILT_IN_CI" - # Install requirements RUN chmod +x ./scripts/install_build_env.sh RUN ./scripts/install_build_env.sh diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index 8e9efc00fa..7f111981ef 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -18,7 +18,7 @@ echo "### Any changes may break CI builds or local Docker environments. ###" echo "######################################################################" echo "" -set -x +set -e echo "[*] BUILT_IN_CI is set → using prebuilt binaries." echo "[*] Mapping TARGETARCH=${TARGETARCH} to Rust triple..." @@ -39,9 +39,10 @@ echo "[*] Using BUILD_TRIPLE=$BUILD_TRIPLE" echo "[*] Copying binaries to expected /build/target layout..." for RUNTIME in fast-runtime non-fast-runtime; do - echo "::group::/build directory tree" - find /build -type f | sort - echo "::endgroup::" +# keep it for future debug +# echo "::group::/build directory tree" +# find /build -type f | sort +# echo "::endgroup::" mkdir -p /build/target/${RUNTIME}/release/wbuild/node-subtensor-runtime cp -v /build/build/ci_target/${RUNTIME}/${BUILD_TRIPLE}/release/node-subtensor \ From e0d43b078ad45abe8141a089f5825772ae4c6a02 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:40:27 -0700 Subject: [PATCH 345/379] use powerful runners --- .github/workflows/docker-localnet.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 6ad5c97f73..eb87909fa7 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -28,7 +28,7 @@ permissions: jobs: setup: - runs-on: ubuntu-latest + runs-on: [self-hosted, type-cax41] outputs: tag: ${{ steps.vars.outputs.tag }} ref: ${{ steps.vars.outputs.ref }} @@ -53,15 +53,15 @@ jobs: fi # build artifacts for fast-runtime and non-fast-runtime - build: + build_bins: name: Build • ${{ matrix.platform.triple }} • ${{ matrix.runtime }} needs: setup strategy: matrix: platform: - - runner: ubuntu-latest + - runner: [self-hosted, type-cax41] triple: x86_64-unknown-linux-gnu - - runner: ubuntu-24.04-arm + - runner: [self-hosted, type-cax41, image-arm-app-docker-ce] triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] @@ -121,8 +121,8 @@ jobs: if-no-files-found: error # Collect all artifacts and publish them to docker repo - publish: - needs: [setup, build] + build_and_publish: + needs: [setup, build_bins] runs-on: ubuntu-latest defaults: run: From f6707e569bfeda656af7848f384398d24cb94a79 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:41:56 -0700 Subject: [PATCH 346/379] overkill for setup --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index eb87909fa7..4f775936b4 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -28,7 +28,7 @@ permissions: jobs: setup: - runs-on: [self-hosted, type-cax41] + runs-on: ubuntu-latest outputs: tag: ${{ steps.vars.outputs.tag }} ref: ${{ steps.vars.outputs.ref }} From aa3956a6cdd550c61be9b186c610517670f8b094 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:50:28 -0700 Subject: [PATCH 347/379] arm dead --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 4f775936b4..6c5009d9ef 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -61,7 +61,7 @@ jobs: platform: - runner: [self-hosted, type-cax41] triple: x86_64-unknown-linux-gnu - - runner: [self-hosted, type-cax41, image-arm-app-docker-ce] + - runner: [ubuntu-24.04-arm] triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] From 4988df328c883a4ae5e7b4226a6f89324db4b311 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 16:52:14 -0700 Subject: [PATCH 348/379] build_and_publish --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 6c5009d9ef..3803e0c472 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -123,7 +123,7 @@ jobs: # Collect all artifacts and publish them to docker repo build_and_publish: needs: [setup, build_bins] - runs-on: ubuntu-latest + runs-on: [self-hosted, type-cax41] defaults: run: working-directory: ${{ github.workspace }} From ba51d4a2f9837148c5ac46910eb55e1aa2b3270a Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 17:00:36 -0700 Subject: [PATCH 349/379] add source "$HOME/.cargo/env" --- .github/workflows/docker-localnet.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 3803e0c472..81bd10fbd8 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -81,6 +81,7 @@ jobs: - name: Add Rust target triple run: | + source "$HOME/.cargo/env" rustup target add ${{ matrix.platform.triple }} - name: Patch limits for local run From 9cbd950ddde22517c9db9a0b87e0116d8ec2f1d1 Mon Sep 17 00:00:00 2001 From: Roman Date: Fri, 19 Sep 2025 20:19:43 -0700 Subject: [PATCH 350/379] update runners --- .github/workflows/docker-localnet.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 81bd10fbd8..0c165d8561 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -59,9 +59,10 @@ jobs: strategy: matrix: platform: - - runner: [self-hosted, type-cax41] + - runner: [self-hosted, cax41] triple: x86_64-unknown-linux-gnu - - runner: [ubuntu-24.04-arm] + # - runner: [ubuntu-24.04-arm] + - runner: [self-hosted, cax41, image-arm-app-docker-ce] triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] From 8d32d044911838f698b5c6854c38d375a8ad747f Mon Sep 17 00:00:00 2001 From: Roman Date: Sat, 20 Sep 2025 01:49:01 -0700 Subject: [PATCH 351/379] SubtensorCI for arm build (test) --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 0c165d8561..5f8ed94088 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -62,7 +62,7 @@ jobs: - runner: [self-hosted, cax41] triple: x86_64-unknown-linux-gnu # - runner: [ubuntu-24.04-arm] - - runner: [self-hosted, cax41, image-arm-app-docker-ce] + - runner: [SubtensorCI] triple: aarch64-unknown-linux-gnu runtime: ["fast-runtime", "non-fast-runtime"] From 9358fb0a95eb3a14b715bbb0a8ab46f657b5a24b Mon Sep 17 00:00:00 2001 From: Roman Date: Sat, 20 Sep 2025 01:55:44 -0700 Subject: [PATCH 352/379] ubuntu-24.04-arm for arm build --- .github/workflows/docker-localnet.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 5f8ed94088..166c8d83e6 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -54,16 +54,17 @@ jobs: # build artifacts for fast-runtime and non-fast-runtime build_bins: - name: Build • ${{ matrix.platform.triple }} • ${{ matrix.runtime }} + name: Build • ${{ matrix.runtime }} • ${{ matrix.platform.arch }} needs: setup strategy: matrix: platform: - runner: [self-hosted, cax41] triple: x86_64-unknown-linux-gnu - # - runner: [ubuntu-24.04-arm] - - runner: [SubtensorCI] + arch: amd64 + - runner: [ubuntu-24.04-arm] triple: aarch64-unknown-linux-gnu + arch: arm64 runtime: ["fast-runtime", "non-fast-runtime"] From 7cd0807702e4efaa37e8365247484858ad46a80b Mon Sep 17 00:00:00 2001 From: Roman Date: Sat, 20 Sep 2025 02:39:00 -0700 Subject: [PATCH 353/379] cax41 --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 166c8d83e6..23966554cc 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -126,7 +126,7 @@ jobs: # Collect all artifacts and publish them to docker repo build_and_publish: needs: [setup, build_bins] - runs-on: [self-hosted, type-cax41] + runs-on: [self-hosted, cax41] defaults: run: working-directory: ${{ github.workspace }} From 9479d89e0c10fad25c90fbaee2bceb10c1918817 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sat, 20 Sep 2025 17:21:10 -0700 Subject: [PATCH 354/379] convet a to t --- pallets/swap/src/pallet/impls.rs | 129 ++++++++++++++++++++++++++++--- 1 file changed, 118 insertions(+), 11 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index c0a109bfb5..a600b392ce 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1232,23 +1232,125 @@ impl Pallet { to_close .sort_by(|a, b| (a.owner == protocol_account).cmp(&(b.owner == protocol_account))); + let mut user_refunded_tao = TaoCurrency::ZERO; + let mut burned_tao = TaoCurrency::ZERO; + let mut burned_alpha = AlphaCurrency::ZERO; + + // Helper: build a very lax sqrt price limit. + // Mirrors the wrapper’s transformation: price_limit / 1e9, then sqrt(). + let compute_limit = || { + SqrtPrice::saturating_from_num(u64::MAX) + .safe_div(SqrtPrice::saturating_from_num(1_000_000_000u64)) + .checked_sqrt(SqrtPrice::saturating_from_num(0.0000000001f64)) + }; + for CloseItem { owner, pos_id } in to_close.into_iter() { match Self::do_remove_liquidity(netuid, &owner, pos_id) { Ok(rm) => { - if rm.tao > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, rm.tao); - } - if owner != protocol_account { - T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); - let alpha_burn = rm.alpha.saturating_add(rm.fee_alpha); - if alpha_burn > AlphaCurrency::ZERO { - T::BalanceOps::decrease_provided_alpha_reserve(netuid, alpha_burn); + // α withdrawn from the pool = principal + accrued fees + let alpha_total_from_pool: AlphaCurrency = + rm.alpha.saturating_add(rm.fee_alpha); + + if owner == protocol_account { + // ---------------- PROTOCOL: burn everything ---------------- + if rm.tao > TaoCurrency::ZERO { + burned_tao = burned_tao.saturating_add(rm.tao); + } + if alpha_total_from_pool > AlphaCurrency::ZERO { + burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); + } + + log::debug!( + "dissolve_all_lp: burned protocol position: netuid={:?}, pos_id={:?}, τ={:?}, α_principal={:?}, α_fees={:?}", + netuid, + pos_id, + rm.tao, + rm.alpha, + rm.fee_alpha + ); + } else { + // ---------------- USER: refund τ and convert α → τ ---------------- + + // 1) Refund τ principal directly. + if rm.tao > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, rm.tao); + user_refunded_tao = user_refunded_tao.saturating_add(rm.tao); + T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + } + + // 2) Convert ALL α withdrawn (principal + fees) to τ and refund τ to user. + if alpha_total_from_pool > AlphaCurrency::ZERO { + // α → τ via AMM swap (sell α). Drop trading fees on forced dissolve. + let sell_amount: u64 = alpha_total_from_pool.into(); + + if let Some(limit_sqrt_price) = compute_limit() { + match Self::do_swap( + netuid, + OrderType::Sell, + sell_amount, + limit_sqrt_price, + true, + false, + ) { + Ok(sres) => { + // Credit τ output to the user. + let tao_out: TaoCurrency = sres.amount_paid_out.into(); + if tao_out > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, tao_out); + user_refunded_tao = + user_refunded_tao.saturating_add(tao_out); + } + } + Err(e) => { + // Could not convert α -> τ; log and continue dissolving others. + // (No α is credited to the user; α already removed from pool is effectively burned.) + log::debug!( + "dissolve_all_lp: α→τ swap failed on dissolve: netuid={:?}, owner={:?}, pos_id={:?}, α={:?}, err={:?}", + netuid, + owner, + pos_id, + alpha_total_from_pool, + e + ); + } + } + } else { + log::debug!( + "dissolve_all_lp: invalid price limit during α→τ on dissolve: netuid={:?}, owner={:?}, pos_id={:?}, α={:?}", + netuid, + owner, + pos_id, + alpha_total_from_pool + ); + } + + // Provided‑α reserve (user‑provided liquidity) decreased by what left the pool. + T::BalanceOps::decrease_provided_alpha_reserve( + netuid, + alpha_total_from_pool, + ); } + + log::debug!( + "dissolve_all_lp: user dissolved: netuid={:?}, owner={:?}, pos_id={:?}, τ_refunded={:?}, α_total_converted={:?} (α_principal={:?}, α_fees={:?})", + netuid, + owner, + pos_id, + rm.tao, + alpha_total_from_pool, + rm.alpha, + rm.fee_alpha + ); } } Err(e) => { + // Keep dissolving other positions even if this one fails. log::debug!( - "dissolve_all_lp: force-closing failed position: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, err={e:?}" + "dissolve_all_lp: force-close failed: netuid={:?}, owner={:?}, pos_id={:?}, err={:?}", + netuid, + owner, + pos_id, + e ); continue; } @@ -1277,7 +1379,11 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V3, positions closed; τ principal refunded; α burned; state cleared" + "dissolve_all_liquidity_providers: netuid={:?}, users_refunded_total_τ={:?}; protocol_burned: τ={:?}, α={:?}; state cleared", + netuid, + user_refunded_tao, + burned_tao, + burned_alpha ); return Ok(()); @@ -1305,7 +1411,8 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, state_cleared" + "dissolve_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, state_cleared", + netuid ); Ok(()) From 2b0a5f6f8ab07885250372535e5966736df61309 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Sat, 20 Sep 2025 17:26:16 -0700 Subject: [PATCH 355/379] fix comments --- pallets/swap/src/pallet/impls.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index a600b392ce..c0f0f8f12e 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1280,7 +1280,6 @@ impl Pallet { // 2) Convert ALL α withdrawn (principal + fees) to τ and refund τ to user. if alpha_total_from_pool > AlphaCurrency::ZERO { - // α → τ via AMM swap (sell α). Drop trading fees on forced dissolve. let sell_amount: u64 = alpha_total_from_pool.into(); if let Some(limit_sqrt_price) = compute_limit() { @@ -1293,7 +1292,6 @@ impl Pallet { false, ) { Ok(sres) => { - // Credit τ output to the user. let tao_out: TaoCurrency = sres.amount_paid_out.into(); if tao_out > TaoCurrency::ZERO { T::BalanceOps::increase_balance(&owner, tao_out); @@ -1302,8 +1300,6 @@ impl Pallet { } } Err(e) => { - // Could not convert α -> τ; log and continue dissolving others. - // (No α is credited to the user; α already removed from pool is effectively burned.) log::debug!( "dissolve_all_lp: α→τ swap failed on dissolve: netuid={:?}, owner={:?}, pos_id={:?}, α={:?}, err={:?}", netuid, @@ -1324,7 +1320,6 @@ impl Pallet { ); } - // Provided‑α reserve (user‑provided liquidity) decreased by what left the pool. T::BalanceOps::decrease_provided_alpha_reserve( netuid, alpha_total_from_pool, @@ -1344,7 +1339,6 @@ impl Pallet { } } Err(e) => { - // Keep dissolving other positions even if this one fails. log::debug!( "dissolve_all_lp: force-close failed: netuid={:?}, owner={:?}, pos_id={:?}, err={:?}", netuid, From 1cff4250b91871cd966fa611443829ac2ac2f922 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 00:18:23 -0700 Subject: [PATCH 356/379] ubuntu-24.04-arm --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 23966554cc..3e4e7dbb61 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -126,7 +126,7 @@ jobs: # Collect all artifacts and publish them to docker repo build_and_publish: needs: [setup, build_bins] - runs-on: [self-hosted, cax41] + runs-on: [ubuntu-24.04-arm] defaults: run: working-directory: ${{ github.workspace }} From 4432db387ec822dfecf7d2774c65b282ffcda3df Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 05:30:24 -0700 Subject: [PATCH 357/379] ubuntu-latest --- .github/workflows/docker-localnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 3e4e7dbb61..88ee0f4c2a 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -59,7 +59,7 @@ jobs: strategy: matrix: platform: - - runner: [self-hosted, cax41] + - runner: [ubuntu-latest] triple: x86_64-unknown-linux-gnu arch: amd64 - runner: [ubuntu-24.04-arm] From 68d3ad7117ab4b945561c98dbd9b881ae15f254a Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 06:07:49 -0700 Subject: [PATCH 358/379] Let's try skipping dependency installation and check the image. --- scripts/install_build_env.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index 10d9629f53..ae1982a2ac 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -1,5 +1,11 @@ #!/bin/bash +# If binaries are compiled in CI then skip this script +if [ -n "$BUILT_IN_CI" ]; then + echo "[*] BUILT_IN_CI is set to '$BUILT_IN_CI'. Skipping script..." + exit 0 +fi + echo "" echo "######################################################################" echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" From 8e5a11cb406413d0aded3ff3a6b78cff4a5a4209 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 06:10:24 -0700 Subject: [PATCH 359/379] Let's try skipping dependency installation and check the image. --- scripts/install_build_env.sh | 2 ++ scripts/install_prebuilt_binaries.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/scripts/install_build_env.sh b/scripts/install_build_env.sh index ae1982a2ac..c9fd8bebca 100644 --- a/scripts/install_build_env.sh +++ b/scripts/install_build_env.sh @@ -8,6 +8,8 @@ fi echo "" echo "######################################################################" +echo "### Install build environment dependencies ###" +echo "######################################################################" echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" echo "### ###" echo "### This script is used by: ###" diff --git a/scripts/install_prebuilt_binaries.sh b/scripts/install_prebuilt_binaries.sh index 7f111981ef..e5b11ebf6d 100755 --- a/scripts/install_prebuilt_binaries.sh +++ b/scripts/install_prebuilt_binaries.sh @@ -8,6 +8,8 @@ fi echo "" echo "######################################################################" +echo "### Pre-built binary substitution ###" +echo "######################################################################" echo "### WARNING: DO NOT MODIFY THIS SCRIPT UNLESS YOU KNOW WHY! ###" echo "### ###" echo "### This script is used by: ###" From d0db135ffd40e44eb987f10ba885bd574bcd71c7 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 07:01:25 -0700 Subject: [PATCH 360/379] [self-hosted, type-ccx43] --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 88ee0f4c2a..36392e9064 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -59,7 +59,7 @@ jobs: strategy: matrix: platform: - - runner: [ubuntu-latest] + - runner: [self-hosted, type-ccx43] triple: x86_64-unknown-linux-gnu arch: amd64 - runner: [ubuntu-24.04-arm] @@ -126,7 +126,7 @@ jobs: # Collect all artifacts and publish them to docker repo build_and_publish: needs: [setup, build_bins] - runs-on: [ubuntu-24.04-arm] + runs-on: [self-hosted, type-ccx43] defaults: run: working-directory: ${{ github.workspace }} From b27d92686c64b2f6cbe0e8fa7192785b590f1278 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 07:04:09 -0700 Subject: [PATCH 361/379] we have more ccx33 tagged runners --- .github/workflows/docker-localnet.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 36392e9064..5800a4e018 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -59,7 +59,7 @@ jobs: strategy: matrix: platform: - - runner: [self-hosted, type-ccx43] + - runner: [self-hosted, type-ccx33] triple: x86_64-unknown-linux-gnu arch: amd64 - runner: [ubuntu-24.04-arm] @@ -126,7 +126,7 @@ jobs: # Collect all artifacts and publish them to docker repo build_and_publish: needs: [setup, build_bins] - runs-on: [self-hosted, type-ccx43] + runs-on: [self-hosted, type-ccx33] defaults: run: working-directory: ${{ github.workspace }} From 6b847c418fedc4b98552a56942f814b9152c5748 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 07:33:07 -0700 Subject: [PATCH 362/379] remove cache from Build and push Docker image --- .github/workflows/docker-localnet.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index 5800a4e018..fd0443f286 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -177,5 +177,3 @@ jobs: tags: | ghcr.io/${{ github.repository }}-localnet:${{ needs.setup.outputs.tag }} ${{ needs.setup.outputs.latest_tag == 'true' && format('ghcr.io/{0}-localnet:latest', github.repository) || '' }} - cache-from: type=gha - cache-to: type=gha,mode=max From 68af1e860bfe4ddd5cd648bd0f8292bd3f0e50b0 Mon Sep 17 00:00:00 2001 From: Roman Date: Sun, 21 Sep 2025 08:16:39 -0700 Subject: [PATCH 363/379] cleanup and comments --- .github/workflows/docker-localnet.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker-localnet.yml b/.github/workflows/docker-localnet.yml index fd0443f286..c79be55103 100644 --- a/.github/workflows/docker-localnet.yml +++ b/.github/workflows/docker-localnet.yml @@ -53,12 +53,13 @@ jobs: fi # build artifacts for fast-runtime and non-fast-runtime - build_bins: - name: Build • ${{ matrix.runtime }} • ${{ matrix.platform.arch }} + artifacts: + name: Node • ${{ matrix.runtime }} • ${{ matrix.platform.arch }} needs: setup strategy: matrix: platform: + # triple names used `in scripts/install_prebuilt_binaries.sh` file - runner: [self-hosted, type-ccx33] triple: x86_64-unknown-linux-gnu arch: amd64 @@ -124,8 +125,8 @@ jobs: if-no-files-found: error # Collect all artifacts and publish them to docker repo - build_and_publish: - needs: [setup, build_bins] + docker: + needs: [setup, artifacts] runs-on: [self-hosted, type-ccx33] defaults: run: From a4341fc641ba4156114dd6dd6e49d3df6e41d0c0 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 22 Sep 2025 09:56:01 -0700 Subject: [PATCH 364/379] netuid => netuid_index --- pallets/subtensor/src/rpc_info/metagraph.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index d0d7b1b94e..d4ddd161a6 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -1453,7 +1453,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(NetUidStorageIndex::from(netuid)) + Incentive::::get(NetUidStorageIndex::from(netuid_index)) .into_iter() .map(Compact::from) .collect(), @@ -1464,7 +1464,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(NetUidStorageIndex::from(netuid)) + LastUpdate::::get(NetUidStorageIndex::from(netuid_index)) .into_iter() .map(Compact::from) .collect(), From d6b35292690a23d9c32eef5a5a5332ee053f54a8 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 22 Sep 2025 10:13:06 -0700 Subject: [PATCH 365/379] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 254bec73a3..20f4bac2b3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 318, + spec_version: 319, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 773f591a21860e6193698673fa75360075960881 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Mon, 22 Sep 2025 10:45:39 -0700 Subject: [PATCH 366/379] remove unnecessary type conversion --- pallets/subtensor/src/rpc_info/metagraph.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index d4ddd161a6..57c2e15c7f 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -1453,7 +1453,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(NetUidStorageIndex::from(netuid_index)) + Incentive::::get(netuid_index) .into_iter() .map(Compact::from) .collect(), @@ -1464,7 +1464,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(NetUidStorageIndex::from(netuid_index)) + LastUpdate::::get(netuid_index) .into_iter() .map(Compact::from) .collect(), From 731be2cdcdad8f6cf1b1b78d3138744272efa582 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 23 Sep 2025 12:11:19 -0400 Subject: [PATCH 367/379] Remove unnecessary error logging, improve log searching patterns for math errors --- pallets/subtensor/src/epoch/math.rs | 51 ++++++++++-------------- pallets/subtensor/src/epoch/run_epoch.rs | 6 +-- 2 files changed, 24 insertions(+), 33 deletions(-) diff --git a/pallets/subtensor/src/epoch/math.rs b/pallets/subtensor/src/epoch/math.rs index 4b613fa961..2cbbfae77d 100644 --- a/pallets/subtensor/src/epoch/math.rs +++ b/pallets/subtensor/src/epoch/math.rs @@ -294,7 +294,7 @@ pub fn inplace_row_normalize_64(x: &mut [Vec]) { pub fn vecdiv(x: &[I32F32], y: &[I32F32]) -> Vec { if x.len() != y.len() { log::error!( - "vecdiv input lengths are not equal: {:?} != {:?}", + "math error: vecdiv input lengths are not equal: {:?} != {:?}", x.len(), y.len() ); @@ -487,7 +487,7 @@ pub fn inplace_col_max_upscale(x: &mut [Vec]) { pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { if mask.len() != vector.len() { log::error!( - "inplace_mask_vector input lengths are not equal: {:?} != {:?}", + "math error: inplace_mask_vector input lengths are not equal: {:?} != {:?}", mask.len(), vector.len() ); @@ -508,7 +508,7 @@ pub fn inplace_mask_vector(mask: &[bool], vector: &mut [I32F32]) { pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { if mask.len() != matrix.len() { log::error!( - "inplace_mask_matrix input sizes are not equal: {:?} != {:?}", + "math error: inplace_mask_matrix input sizes are not equal: {:?} != {:?}", mask.len(), matrix.len() ); @@ -538,7 +538,7 @@ pub fn inplace_mask_matrix(mask: &[Vec], matrix: &mut [Vec]) { pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { if mask.len() != matrix.len() { log::error!( - "inplace_mask_rows input sizes are not equal: {:?} != {:?}", + "math error: inplace_mask_rows input sizes are not equal: {:?} != {:?}", mask.len(), matrix.len() ); @@ -560,7 +560,7 @@ pub fn inplace_mask_rows(mask: &[bool], matrix: &mut [Vec]) { pub fn inplace_mask_cols(mask: &[bool], matrix: &mut [Vec]) { if mask.len() != matrix.len() { log::error!( - "inplace_mask_cols input sizes are not equal: {:?} != {:?}", + "math error: inplace_mask_cols input sizes are not equal: {:?} != {:?}", mask.len(), matrix.len() ); @@ -591,7 +591,7 @@ pub fn inplace_mask_diag(matrix: &mut [Vec]) { // with no action. Log error if this happens. if matrix.len() != first_row.len() { log::error!( - "inplace_mask_diag: matrix.len {:?} != first_row.len {:?}", + "math error: inplace_mask_diag: matrix.len {:?} != first_row.len {:?}", matrix.len(), first_row.len() ); @@ -641,7 +641,7 @@ pub fn inplace_mask_diag_except_index(matrix: &mut [Vec], except_index: } if matrix.len() != first_row.len() { log::error!( - "inplace_mask_diag input matrix is now square: {:?} != {:?}", + "math error: inplace_mask_diag input matrix is now square: {:?} != {:?}", matrix.len(), first_row.len() ); @@ -794,7 +794,7 @@ pub fn matmul(matrix: &[Vec], vector: &[I32F32]) -> Vec { } if matrix.len() != vector.len() { log::error!( - "matmul input sizes are not equal: {:?} != {:?}", + "math error: matmul input sizes are not equal: {:?} != {:?}", matrix.len(), vector.len() ); @@ -830,11 +830,11 @@ pub fn matmul_transpose(matrix: &[Vec], vector: &[I32F32]) -> Vec], mat2: &[Vec], ratio: I32F32) -> } if mat1.len() != mat2.len() { log::error!( - "interpolate mat1.len() != mat2.len(): {:?} != {:?}", + "math error: interpolate mat1.len() != mat2.len(): {:?} != {:?}", mat1.len(), mat2.len() ); @@ -1213,12 +1213,12 @@ pub fn interpolate(mat1: &[Vec], mat2: &[Vec], ratio: I32F32) -> for row1 in mat1.iter() { let (Some(row2), Some(out_row)) = (m2_it.next(), out_it.next()) else { - log::error!("interpolate: No more rows in mat2"); + log::error!("math error: interpolate: No more rows in mat2"); break; }; if row1.len() != row2.len() { log::error!( - "interpolate row1.len() != row2.len(): {:?} != {:?}", + "math error: interpolate row1.len() != row2.len(): {:?} != {:?}", row1.len(), row2.len() ); @@ -1259,7 +1259,7 @@ pub fn interpolate_sparse( if mat1.len() != mat2.len() { // In case if sizes mismatch, return clipped weights log::error!( - "interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}", + "math error: interpolate_sparse: mat1.len() != mat2.len(): {:?} != {:?}", mat1.len(), mat2.len() ); @@ -1410,7 +1410,7 @@ pub fn mat_ema_sparse( ) -> Vec> { if new.len() != old.len() { log::error!( - "mat_ema_sparse: new.len() == old.len(): {:?} != {:?}", + "math error: mat_ema_sparse: new.len() == old.len(): {:?} != {:?}", new.len(), old.len() ); @@ -1469,7 +1469,7 @@ pub fn mat_ema_alpha_sparse( // If shapes don't match, just return `new` if new.len() != old.len() || new.len() != alpha.len() { log::error!( - "mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}", + "math error: mat_ema_alpha_sparse shapes don't match: {:?} vs. {:?} vs. {:?}", old.len(), new.len(), alpha.len() @@ -1490,15 +1490,6 @@ pub fn mat_ema_alpha_sparse( break; }; - if new_row.len() != old_row.len() || new_row.len() != alpha_row.len() { - log::error!( - "mat_ema_alpha_sparse row shapes don't match: {:?} vs. {:?} vs. {:?}", - old_row.len(), - new_row.len(), - alpha_row.len() - ); - } - // Densified accumulator sized to alpha_row length (columns outside are ignored). let mut decayed_values = vec![zero; alpha_row.len()]; @@ -1546,7 +1537,7 @@ pub fn mat_ema_alpha( // If outer dimensions don't match, return bonds unchanged if new.len() != old.len() || new.len() != alpha.len() { log::error!( - "mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}", + "math error: mat_ema_alpha shapes don't match: {:?} vs. {:?} vs. {:?}", old.len(), new.len(), alpha.len() diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 660690ae9f..cf7d4dc050 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1128,7 +1128,7 @@ impl Pallet { if let Some(row) = weights.get_mut(uid_i as usize) { row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); } else { - log::error!("uid_i {uid_i:?} is filtered to be less than n"); + log::error!("math error: uid_i {uid_i:?} is filtered to be less than n"); } } } @@ -1391,7 +1391,7 @@ impl Pallet { if weights.len() != bonds.len() { log::error!( - "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + "math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len() ); @@ -1444,7 +1444,7 @@ impl Pallet { if weights.len() != bonds.len() { log::error!( - "compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", + "math error: compute_liquid_alpha_values: weights and bonds have different lengths: {:?} != {:?}", weights.len(), bonds.len() ); From a490a0ca41f29ab6bf7493bf5357dd699826d0ab Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 23 Sep 2025 12:12:01 -0400 Subject: [PATCH 368/379] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 254bec73a3..20f4bac2b3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 318, + spec_version: 319, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 98ef9f617d915bae7d3802ad9f1327135b4f5fdd Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 09:48:45 -0700 Subject: [PATCH 369/379] add stake instead of swap --- common/src/lib.rs | 5 +- pallets/subtensor/src/lib.rs | 12 ++++ pallets/swap/src/mock.rs | 20 ++++++ pallets/swap/src/pallet/impls.rs | 114 +++++++++++++++++-------------- 4 files changed, 97 insertions(+), 54 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index 6122ef99fa..a5d09ad974 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -9,7 +9,7 @@ use runtime_common::prod_or_fast; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_runtime::{ - MultiSignature, + MultiSignature, Vec, traits::{IdentifyAccount, Verify}, }; use subtensor_macros::freeze_struct; @@ -175,6 +175,9 @@ pub trait SubnetInfo { fn mechanism(netuid: NetUid) -> u16; fn is_owner(account_id: &AccountId, netuid: NetUid) -> bool; fn is_subtoken_enabled(netuid: NetUid) -> bool; + fn get_validator_trust(netuid: NetUid) -> Vec; + fn get_validator_permit(netuid: NetUid) -> Vec; + fn hotkey_of_uid(netuid: NetUid, uid: u16) -> Option; } pub trait BalanceOps { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index f53ee4f58a..7de32221a0 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -2146,6 +2146,18 @@ impl> fn is_subtoken_enabled(netuid: NetUid) -> bool { SubtokenEnabled::::get(netuid) } + + fn get_validator_trust(netuid: NetUid) -> Vec { + ValidatorTrust::::get(netuid) + } + + fn get_validator_permit(netuid: NetUid) -> Vec { + ValidatorPermit::::get(netuid) + } + + fn hotkey_of_uid(netuid: NetUid, uid: u16) -> Option { + Keys::::try_get(netuid, uid).ok() + } } impl> diff --git a/pallets/swap/src/mock.rs b/pallets/swap/src/mock.rs index 40aac6d796..c79cb95d32 100644 --- a/pallets/swap/src/mock.rs +++ b/pallets/swap/src/mock.rs @@ -120,6 +120,26 @@ impl SubnetInfo for MockLiquidityProvider { fn is_subtoken_enabled(netuid: NetUid) -> bool { netuid.inner() != SUBTOKEN_DISABLED_NETUID } + + fn get_validator_trust(netuid: NetUid) -> Vec { + match netuid.into() { + 123u16 => vec![4000, 3000, 2000, 1000], + WRAPPING_FEES_NETUID => vec![8000, 7000, 6000, 5000], + _ => vec![1000, 800, 600, 400], + } + } + + fn get_validator_permit(netuid: NetUid) -> Vec { + match netuid.into() { + 123u16 => vec![true, true, false, true], + WRAPPING_FEES_NETUID => vec![true, true, true, true], + _ => vec![true, true, true, true], + } + } + + fn hotkey_of_uid(_netuid: NetUid, uid: u16) -> Option { + Some(uid as AccountId) + } } pub struct MockBalanceOps; diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index c0f0f8f12e..4b4a7076b8 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -5,7 +5,7 @@ use frame_support::storage::{TransactionOutcome, transactional}; use frame_support::{ensure, pallet_prelude::DispatchError, traits::Get}; use safe_math::*; use sp_arithmetic::helpers_128bit; -use sp_runtime::{DispatchResult, traits::AccountIdConversion}; +use sp_runtime::{DispatchResult, Vec, traits::AccountIdConversion}; use substrate_fixed::types::{I64F64, U64F64, U96F32}; use subtensor_runtime_common::{ AlphaCurrency, BalanceOps, Currency, NetUid, SubnetInfo, TaoCurrency, @@ -1233,15 +1233,36 @@ impl Pallet { .sort_by(|a, b| (a.owner == protocol_account).cmp(&(b.owner == protocol_account))); let mut user_refunded_tao = TaoCurrency::ZERO; + let mut user_staked_alpha = AlphaCurrency::ZERO; let mut burned_tao = TaoCurrency::ZERO; let mut burned_alpha = AlphaCurrency::ZERO; - // Helper: build a very lax sqrt price limit. - // Mirrors the wrapper’s transformation: price_limit / 1e9, then sqrt(). - let compute_limit = || { - SqrtPrice::saturating_from_num(u64::MAX) - .safe_div(SqrtPrice::saturating_from_num(1_000_000_000u64)) - .checked_sqrt(SqrtPrice::saturating_from_num(0.0000000001f64)) + let trust: Vec = T::SubnetInfo::get_validator_trust(netuid.into()); + let permit: Vec = T::SubnetInfo::get_validator_permit(netuid.into()); + + if trust.len() != permit.len() { + log::debug!( + "dissolve_all_lp: ValidatorTrust/Permit length mismatch: netuid={:?}, trust_len={}, permit_len={}", + netuid, + trust.len(), + permit.len() + ); + return Err(sp_runtime::DispatchError::Other( + "validator_meta_len_mismatch", + )); + } + + // Helper: pick target validator uid, only among permitted validators, by highest trust. + let pick_target_uid = |trust: &Vec, permit: &Vec| -> Option { + let mut best_uid: Option = None; + let mut best_trust: u16 = 0; + for (i, (&t, &p)) in trust.iter().zip(permit.iter()).enumerate() { + if p && (best_uid.is_none() || t > best_trust) { + best_uid = Some(i); + best_trust = t; + } + } + best_uid.map(|i| i as u16) }; for CloseItem { owner, pos_id } in to_close.into_iter() { @@ -1259,14 +1280,12 @@ impl Pallet { if alpha_total_from_pool > AlphaCurrency::ZERO { burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); } - log::debug!( - "dissolve_all_lp: burned protocol position: netuid={:?}, pos_id={:?}, τ={:?}, α_principal={:?}, α_fees={:?}", + "dissolve_all_lp: burned protocol pos: netuid={:?}, pos_id={:?}, τ={:?}, α_total={:?}", netuid, pos_id, rm.tao, - rm.alpha, - rm.fee_alpha + alpha_total_from_pool ); } else { // ---------------- USER: refund τ and convert α → τ ---------------- @@ -1278,41 +1297,40 @@ impl Pallet { T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); } - // 2) Convert ALL α withdrawn (principal + fees) to τ and refund τ to user. + // 2) Stake ALL withdrawn α (principal + fees) to the best permitted validator. if alpha_total_from_pool > AlphaCurrency::ZERO { - let sell_amount: u64 = alpha_total_from_pool.into(); + if let Some(target_uid) = pick_target_uid(&trust, &permit) { + let validator_hotkey: T::AccountId = + T::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid) + .ok_or(sp_runtime::DispatchError::Other( + "validator_hotkey_missing", + ))?; + + // Stake α from LP owner (coldkey) to chosen validator (hotkey). + T::BalanceOps::increase_stake( + &owner, + &validator_hotkey, + netuid, + alpha_total_from_pool, + )?; + + user_staked_alpha = + user_staked_alpha.saturating_add(alpha_total_from_pool); - if let Some(limit_sqrt_price) = compute_limit() { - match Self::do_swap( + log::debug!( + "dissolve_all_lp: user dissolved & staked α: netuid={:?}, owner={:?}, pos_id={:?}, α_staked={:?}, target_uid={}", netuid, - OrderType::Sell, - sell_amount, - limit_sqrt_price, - true, - false, - ) { - Ok(sres) => { - let tao_out: TaoCurrency = sres.amount_paid_out.into(); - if tao_out > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, tao_out); - user_refunded_tao = - user_refunded_tao.saturating_add(tao_out); - } - } - Err(e) => { - log::debug!( - "dissolve_all_lp: α→τ swap failed on dissolve: netuid={:?}, owner={:?}, pos_id={:?}, α={:?}, err={:?}", - netuid, - owner, - pos_id, - alpha_total_from_pool, - e - ); - } - } + owner, + pos_id, + alpha_total_from_pool, + target_uid + ); } else { + // No permitted validators; burn to avoid balance drift. + burned_alpha = + burned_alpha.saturating_add(alpha_total_from_pool); log::debug!( - "dissolve_all_lp: invalid price limit during α→τ on dissolve: netuid={:?}, owner={:?}, pos_id={:?}, α={:?}", + "dissolve_all_lp: no permitted validators; α burned: netuid={:?}, owner={:?}, pos_id={:?}, α_total={:?}", netuid, owner, pos_id, @@ -1325,17 +1343,6 @@ impl Pallet { alpha_total_from_pool, ); } - - log::debug!( - "dissolve_all_lp: user dissolved: netuid={:?}, owner={:?}, pos_id={:?}, τ_refunded={:?}, α_total_converted={:?} (α_principal={:?}, α_fees={:?})", - netuid, - owner, - pos_id, - rm.tao, - alpha_total_from_pool, - rm.alpha, - rm.fee_alpha - ); } } Err(e) => { @@ -1373,9 +1380,10 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={:?}, users_refunded_total_τ={:?}; protocol_burned: τ={:?}, α={:?}; state cleared", + "dissolve_all_liquidity_providers: netuid={:?}, users_refunded_total_τ={:?}, users_staked_total_α={:?}; protocol_burned: τ={:?}, α={:?}; state cleared", netuid, user_refunded_tao, + user_staked_alpha, burned_tao, burned_alpha ); From b2041c52de5984779a89b99ee81246c60d9e868e Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 09:50:08 -0700 Subject: [PATCH 370/379] clippy --- pallets/swap/src/pallet/impls.rs | 43 ++++++++------------------------ 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 4b4a7076b8..a077182666 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1241,11 +1241,10 @@ impl Pallet { let permit: Vec = T::SubnetInfo::get_validator_permit(netuid.into()); if trust.len() != permit.len() { + let trust_len = trust.len(); + let permit_len = permit.len(); log::debug!( - "dissolve_all_lp: ValidatorTrust/Permit length mismatch: netuid={:?}, trust_len={}, permit_len={}", - netuid, - trust.len(), - permit.len() + "dissolve_all_lp: ValidatorTrust/Permit length mismatch: netuid={netuid:?}, trust_len={trust_len}, permit_len={permit_len}" ); return Err(sp_runtime::DispatchError::Other( "validator_meta_len_mismatch", @@ -1280,12 +1279,9 @@ impl Pallet { if alpha_total_from_pool > AlphaCurrency::ZERO { burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); } + let tao = rm.tao; log::debug!( - "dissolve_all_lp: burned protocol pos: netuid={:?}, pos_id={:?}, τ={:?}, α_total={:?}", - netuid, - pos_id, - rm.tao, - alpha_total_from_pool + "dissolve_all_lp: burned protocol pos: netuid={netuid:?}, pos_id={pos_id:?}, τ={tao:?}, α_total={alpha_total_from_pool:?}" ); } else { // ---------------- USER: refund τ and convert α → τ ---------------- @@ -1318,23 +1314,14 @@ impl Pallet { user_staked_alpha.saturating_add(alpha_total_from_pool); log::debug!( - "dissolve_all_lp: user dissolved & staked α: netuid={:?}, owner={:?}, pos_id={:?}, α_staked={:?}, target_uid={}", - netuid, - owner, - pos_id, - alpha_total_from_pool, - target_uid + "dissolve_all_lp: user dissolved & staked α: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_staked={alpha_total_from_pool:?}, target_uid={target_uid}" ); } else { // No permitted validators; burn to avoid balance drift. burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); log::debug!( - "dissolve_all_lp: no permitted validators; α burned: netuid={:?}, owner={:?}, pos_id={:?}, α_total={:?}", - netuid, - owner, - pos_id, - alpha_total_from_pool + "dissolve_all_lp: no permitted validators; α burned: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_total={alpha_total_from_pool:?}" ); } @@ -1347,11 +1334,7 @@ impl Pallet { } Err(e) => { log::debug!( - "dissolve_all_lp: force-close failed: netuid={:?}, owner={:?}, pos_id={:?}, err={:?}", - netuid, - owner, - pos_id, - e + "dissolve_all_lp: force-close failed: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, err={e:?}" ); continue; } @@ -1380,12 +1363,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={:?}, users_refunded_total_τ={:?}, users_staked_total_α={:?}; protocol_burned: τ={:?}, α={:?}; state cleared", - netuid, - user_refunded_tao, - user_staked_alpha, - burned_tao, - burned_alpha + "dissolve_all_liquidity_providers: netuid={netuid:?}, users_refunded_total_τ={user_refunded_tao:?}, users_staked_total_α={user_staked_alpha:?}; protocol_burned: τ={burned_tao:?}, α={burned_alpha:?}; state cleared" ); return Ok(()); @@ -1413,8 +1391,7 @@ impl Pallet { EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={:?}, mode=V2-or-nonV3, state_cleared", - netuid + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, state_cleared" ); Ok(()) From 47120ba97ad6fbeedf0071fe57e2f9abdcbd74b8 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 09:59:15 -0700 Subject: [PATCH 371/379] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 254bec73a3..20f4bac2b3 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 318, + spec_version: 319, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From e8cdc1c0e7eb0725da8cd44eb76db0f036ad981b Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 10:25:47 -0700 Subject: [PATCH 372/379] blank commit From dda8b2421e15bba61687add4d4a895fbfaa0d99c Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 11:29:56 -0700 Subject: [PATCH 373/379] test_dissolve_v3_green_path_refund_tao_stake_alpha --- pallets/swap/src/pallet/tests.rs | 174 +++++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index dc7f08baa8..afd70e0b66 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -2536,3 +2536,177 @@ fn refund_alpha_same_cold_multiple_hotkeys_conserved_to_owner() { ); }); } + +#[test] +fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { + new_test_ext().execute_with(|| { + // --- Setup --- + let netuid = NetUid::from(42); + let cold = OK_COLDKEY_ACCOUNT_ID; + let hot = OK_HOTKEY_ACCOUNT_ID; + + assert_ok!(Swap::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid.into(), + true + )); + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!(SwapV3Initialized::::get(netuid)); + + // Tight in‑range band so BOTH τ and α are required. + let ct = CurrentTick::::get(netuid); + let tick_low = ct.saturating_sub(10); + let tick_high = ct.saturating_add(10); + let liquidity: u64 = 1_250_000; + + // Add liquidity and capture required τ/α. + let (_pos_id, tao_needed, alpha_needed) = + Pallet::::do_add_liquidity(netuid, &cold, &hot, tick_low, tick_high, liquidity) + .expect("add in-range liquidity"); + assert!(tao_needed > 0, "in-range pos must require TAO"); + assert!(alpha_needed > 0, "in-range pos must require ALPHA"); + + // Determine the permitted validator with the highest trust (green path). + let trust = ::SubnetInfo::get_validator_trust(netuid.into()); + let permit = ::SubnetInfo::get_validator_permit(netuid.into()); + assert_eq!(trust.len(), permit.len(), "trust/permit must align"); + let target_uid: u16 = trust + .iter() + .zip(permit.iter()) + .enumerate() + .filter(|(_, (_t, p))| **p) + .max_by_key(|(_, (t, _))| *t) + .map(|(i, _)| i as u16) + .expect("at least one permitted validator"); + let validator_hotkey: ::AccountId = + ::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid) + .expect("uid -> hotkey mapping must exist"); + + // --- Snapshot BEFORE we withdraw τ/α to fund the position --- + let tao_before = ::BalanceOps::tao_balance(&cold); + + let alpha_before_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_before_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_before_val = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &validator_hotkey); + + let alpha_before_total = if validator_hotkey == hot { + // Avoid double counting when validator == user's hotkey. + alpha_before_hot + alpha_before_owner + } else { + alpha_before_hot + alpha_before_owner + alpha_before_val + }; + + // --- Mirror extrinsic bookkeeping: withdraw τ & α; bump provided reserves --- + let tao_taken = ::BalanceOps::decrease_balance(&cold, tao_needed.into()) + .expect("decrease TAO"); + let alpha_taken = ::BalanceOps::decrease_stake( + &cold, + &hot, + netuid.into(), + alpha_needed.into(), + ) + .expect("decrease ALPHA"); + + ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); + ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); + + // --- Act: dissolve (GREEN PATH: permitted validators exist) --- + assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + + // --- Assert: τ principal refunded to user --- + let tao_after = ::BalanceOps::tao_balance(&cold); + assert_eq!(tao_after, tao_before, "TAO principal must be refunded"); + + // --- α ledger assertions --- + let alpha_after_hot = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); + let alpha_after_owner = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &cold); + let alpha_after_val = + ::BalanceOps::alpha_balance(netuid.into(), &cold, &validator_hotkey); + + // Owner ledger must be unchanged in the green path. + assert_eq!( + alpha_after_owner, alpha_before_owner, + "Owner α ledger must be unchanged (staked to validator, not refunded)" + ); + + if validator_hotkey == hot { + // Net effect: user's hot ledger returns to its original balance. + assert_eq!( + alpha_after_hot, alpha_before_hot, + "When validator == hotkey, user's hot ledger must net back to its original balance" + ); + + // Totals without double-counting the same ledger. + let alpha_after_total = alpha_after_hot + alpha_after_owner; + assert_eq!( + alpha_after_total, alpha_before_total, + "Total α for the coldkey must be conserved (validator==hotkey)" + ); + } else { + assert!( + alpha_before_hot >= alpha_after_hot, + "hot ledger should not increase" + ); + assert!( + alpha_after_val >= alpha_before_val, + "validator ledger should not decrease" + ); + + let hot_loss = alpha_before_hot - alpha_after_hot; + let val_gain = alpha_after_val - alpha_before_val; + + assert_eq!( + val_gain, hot_loss, + "α that left the user's hot ledger must equal α credited to the validator ledger" + ); + + // Totals across distinct ledgers must be conserved. + let alpha_after_total = alpha_after_hot + alpha_after_owner + alpha_after_val; + assert_eq!( + alpha_after_total, alpha_before_total, + "Total α for the coldkey must be conserved" + ); + } + + // --- Assert: All positions (user + protocol) removed and V3 state cleared --- + let protocol_id = Pallet::::protocol_account_id(); + + assert_eq!(Pallet::::count_positions(netuid, &cold), 0); + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + prot_positions_after.is_empty(), + "protocol positions must be removed" + ); + + // Ticks / liquidity / price / flags cleared + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!(Ticks::::get(netuid, TickIndex::MIN).is_none()); + assert!(Ticks::::get(netuid, TickIndex::MAX).is_none()); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + + // Fee globals cleared + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // Active tick bitmap cleared + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none(), + "active tick bitmap words must be cleared" + ); + + // Knobs removed + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} From 91e99596e9aea42f3ffcd7db5e274e24e47178c1 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 13:42:59 -0700 Subject: [PATCH 374/379] add clear_protocol_liquidity --- pallets/subtensor/src/coinbase/root.rs | 1 + pallets/swap-interface/src/lib.rs | 1 + pallets/swap/src/pallet/impls.rs | 195 ++++++++++++++----------- 3 files changed, 111 insertions(+), 86 deletions(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 4cb9f177e1..6b09c9ed46 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -375,6 +375,7 @@ impl Pallet { // 2. --- Perform the cleanup before removing the network. T::SwapInterface::dissolve_all_liquidity_providers(netuid)?; Self::destroy_alpha_in_out_stakes(netuid)?; + T::SwapInterface::clear_protocol_liquidity(netuid)?; T::CommitmentsInterface::purge_netuid(netuid); // 3. --- Remove the network diff --git a/pallets/swap-interface/src/lib.rs b/pallets/swap-interface/src/lib.rs index d247b28d35..4998bbe379 100644 --- a/pallets/swap-interface/src/lib.rs +++ b/pallets/swap-interface/src/lib.rs @@ -36,6 +36,7 @@ pub trait SwapHandler { fn is_user_liquidity_enabled(netuid: NetUid) -> bool; fn dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult; fn toggle_user_liquidity(netuid: NetUid, enabled: bool); + fn clear_protocol_liquidity(netuid: NetUid) -> DispatchResult; } #[derive(Debug, PartialEq)] diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index a077182666..6def1d1a7f 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1216,26 +1216,29 @@ impl Pallet { /// Dissolve all LPs and clean state. pub fn do_dissolve_all_liquidity_providers(netuid: NetUid) -> DispatchResult { if SwapV3Initialized::::get(netuid) { - // 1) Snapshot (owner, position_id). + // 1) Snapshot only *non‑protocol* positions: (owner, position_id). struct CloseItem { owner: A, pos_id: PositionId, } + let protocol_account = Self::protocol_account_id(); + let mut to_close: sp_std::vec::Vec> = sp_std::vec::Vec::new(); for ((owner, pos_id), _pos) in Positions::::iter_prefix((netuid,)) { - to_close.push(CloseItem { owner, pos_id }); + if owner != protocol_account { + to_close.push(CloseItem { owner, pos_id }); + } } - let protocol_account = Self::protocol_account_id(); - - // Non‑protocol first - to_close - .sort_by(|a, b| (a.owner == protocol_account).cmp(&(b.owner == protocol_account))); + if to_close.is_empty() { + log::debug!( + "dissolve_all_lp: no user positions; netuid={netuid:?}, protocol liquidity untouched" + ); + return Ok(()); + } let mut user_refunded_tao = TaoCurrency::ZERO; let mut user_staked_alpha = AlphaCurrency::ZERO; - let mut burned_tao = TaoCurrency::ZERO; - let mut burned_alpha = AlphaCurrency::ZERO; let trust: Vec = T::SubnetInfo::get_validator_trust(netuid.into()); let permit: Vec = T::SubnetInfo::get_validator_permit(netuid.into()); @@ -1271,65 +1274,50 @@ impl Pallet { let alpha_total_from_pool: AlphaCurrency = rm.alpha.saturating_add(rm.fee_alpha); - if owner == protocol_account { - // ---------------- PROTOCOL: burn everything ---------------- - if rm.tao > TaoCurrency::ZERO { - burned_tao = burned_tao.saturating_add(rm.tao); - } - if alpha_total_from_pool > AlphaCurrency::ZERO { - burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); - } - let tao = rm.tao; - log::debug!( - "dissolve_all_lp: burned protocol pos: netuid={netuid:?}, pos_id={pos_id:?}, τ={tao:?}, α_total={alpha_total_from_pool:?}" - ); - } else { - // ---------------- USER: refund τ and convert α → τ ---------------- - - // 1) Refund τ principal directly. - if rm.tao > TaoCurrency::ZERO { - T::BalanceOps::increase_balance(&owner, rm.tao); - user_refunded_tao = user_refunded_tao.saturating_add(rm.tao); - T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); - } + // ---------------- USER: refund τ and convert α → stake ---------------- - // 2) Stake ALL withdrawn α (principal + fees) to the best permitted validator. - if alpha_total_from_pool > AlphaCurrency::ZERO { - if let Some(target_uid) = pick_target_uid(&trust, &permit) { - let validator_hotkey: T::AccountId = - T::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid) - .ok_or(sp_runtime::DispatchError::Other( - "validator_hotkey_missing", - ))?; - - // Stake α from LP owner (coldkey) to chosen validator (hotkey). - T::BalanceOps::increase_stake( - &owner, - &validator_hotkey, - netuid, - alpha_total_from_pool, + // 1) Refund τ principal directly. + if rm.tao > TaoCurrency::ZERO { + T::BalanceOps::increase_balance(&owner, rm.tao); + user_refunded_tao = user_refunded_tao.saturating_add(rm.tao); + T::BalanceOps::decrease_provided_tao_reserve(netuid, rm.tao); + } + + // 2) Stake ALL withdrawn α (principal + fees) to the best permitted validator. + if alpha_total_from_pool > AlphaCurrency::ZERO { + if let Some(target_uid) = pick_target_uid(&trust, &permit) { + let validator_hotkey: T::AccountId = + T::SubnetInfo::hotkey_of_uid(netuid.into(), target_uid).ok_or( + sp_runtime::DispatchError::Other( + "validator_hotkey_missing", + ), )?; - user_staked_alpha = - user_staked_alpha.saturating_add(alpha_total_from_pool); - - log::debug!( - "dissolve_all_lp: user dissolved & staked α: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_staked={alpha_total_from_pool:?}, target_uid={target_uid}" - ); - } else { - // No permitted validators; burn to avoid balance drift. - burned_alpha = - burned_alpha.saturating_add(alpha_total_from_pool); - log::debug!( - "dissolve_all_lp: no permitted validators; α burned: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_total={alpha_total_from_pool:?}" - ); - } - - T::BalanceOps::decrease_provided_alpha_reserve( + // Stake α from LP owner (coldkey) to chosen validator (hotkey). + T::BalanceOps::increase_stake( + &owner, + &validator_hotkey, netuid, alpha_total_from_pool, + )?; + + user_staked_alpha = + user_staked_alpha.saturating_add(alpha_total_from_pool); + + log::debug!( + "dissolve_all_lp: user dissolved & staked α: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_staked={alpha_total_from_pool:?}, target_uid={target_uid}" + ); + } else { + // No permitted validators; burn to avoid balance drift. + log::debug!( + "dissolve_all_lp: no permitted validators; α burned: netuid={netuid:?}, owner={owner:?}, pos_id={pos_id:?}, α_total={alpha_total_from_pool:?}" ); } + + T::BalanceOps::decrease_provided_alpha_reserve( + netuid, + alpha_total_from_pool, + ); } } Err(e) => { @@ -1341,41 +1329,74 @@ impl Pallet { } } - // 3) Clear active tick index entries, then all swap state. - let active_ticks: sp_std::vec::Vec = - Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); - for ti in active_ticks { - ActiveTickIndexManager::::remove(netuid, ti); - } + log::debug!( + "dissolve_all_liquidity_providers (users-only): netuid={netuid:?}, users_refunded_total_τ={user_refunded_tao:?}, users_staked_total_α={user_staked_alpha:?}; protocol liquidity untouched" + ); - let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); - let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); + return Ok(()); + } - FeeGlobalTao::::remove(netuid); - FeeGlobalAlpha::::remove(netuid); - CurrentLiquidity::::remove(netuid); - CurrentTick::::remove(netuid); - AlphaSqrtPrice::::remove(netuid); - SwapV3Initialized::::remove(netuid); + log::debug!( + "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, leaving all liquidity/state intact" + ); - let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); - FeeRate::::remove(netuid); - EnabledUserLiquidity::::remove(netuid); + Ok(()) + } - log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, users_refunded_total_τ={user_refunded_tao:?}, users_staked_total_α={user_staked_alpha:?}; protocol_burned: τ={burned_tao:?}, α={burned_alpha:?}; state cleared" - ); + /// Clear **protocol-owned** liquidity and wipe all swap state for `netuid`. + pub fn do_clear_protocol_liquidity(netuid: NetUid) -> DispatchResult { + let protocol_account = Self::protocol_account_id(); - return Ok(()); + // 1) Force-close only protocol positions, burning proceeds. + let mut burned_tao = TaoCurrency::ZERO; + let mut burned_alpha = AlphaCurrency::ZERO; + + // Collect protocol position IDs first to avoid mutating while iterating. + let protocol_pos_ids: sp_std::vec::Vec = Positions::::iter_prefix((netuid,)) + .filter_map(|((owner, pos_id), _)| { + if owner == protocol_account { + Some(pos_id) + } else { + None + } + }) + .collect(); + + for pos_id in protocol_pos_ids { + match Self::do_remove_liquidity(netuid, &protocol_account, pos_id) { + Ok(rm) => { + let alpha_total_from_pool: AlphaCurrency = + rm.alpha.saturating_add(rm.fee_alpha); + let tao = rm.tao; + + if tao > TaoCurrency::ZERO { + burned_tao = burned_tao.saturating_add(tao); + } + if alpha_total_from_pool > AlphaCurrency::ZERO { + burned_alpha = burned_alpha.saturating_add(alpha_total_from_pool); + } + + log::debug!( + "clear_protocol_liquidity: burned protocol pos: netuid={netuid:?}, pos_id={pos_id:?}, τ={tao:?}, α_total={alpha_total_from_pool:?}" + ); + } + Err(e) => { + log::debug!( + "clear_protocol_liquidity: force-close failed: netuid={netuid:?}, pos_id={pos_id:?}, err={e:?}" + ); + continue; + } + } } - // V2 / non‑V3: ensure V3 residues are cleared (safe no‑ops). - let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); + // 2) Clear active tick index entries, then all swap state (idempotent even if empty/non‑V3). let active_ticks: sp_std::vec::Vec = Ticks::::iter_prefix(netuid).map(|(ti, _)| ti).collect(); for ti in active_ticks { ActiveTickIndexManager::::remove(netuid, ti); } + + let _ = Positions::::clear_prefix((netuid,), u32::MAX, None); let _ = Ticks::::clear_prefix(netuid, u32::MAX, None); FeeGlobalTao::::remove(netuid); @@ -1386,12 +1407,11 @@ impl Pallet { SwapV3Initialized::::remove(netuid); let _ = TickIndexBitmapWords::::clear_prefix((netuid,), u32::MAX, None); - FeeRate::::remove(netuid); EnabledUserLiquidity::::remove(netuid); log::debug!( - "dissolve_all_liquidity_providers: netuid={netuid:?}, mode=V2-or-nonV3, state_cleared" + "clear_protocol_liquidity: netuid={netuid:?}, protocol_burned: τ={burned_tao:?}, α={burned_alpha:?}; state cleared" ); Ok(()) @@ -1494,6 +1514,9 @@ impl SwapHandler for Pallet { fn toggle_user_liquidity(netuid: NetUid, enabled: bool) { EnabledUserLiquidity::::insert(netuid, enabled) } + fn clear_protocol_liquidity(netuid: NetUid) -> DispatchResult { + Self::do_clear_protocol_liquidity(netuid) + } } #[derive(Debug, PartialEq)] From cd1ba1499a75adcaae8dc206032281d21fc5e424 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 13:48:13 -0700 Subject: [PATCH 375/379] remove back check --- pallets/swap/src/pallet/impls.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 6def1d1a7f..9a41283426 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1243,17 +1243,6 @@ impl Pallet { let trust: Vec = T::SubnetInfo::get_validator_trust(netuid.into()); let permit: Vec = T::SubnetInfo::get_validator_permit(netuid.into()); - if trust.len() != permit.len() { - let trust_len = trust.len(); - let permit_len = permit.len(); - log::debug!( - "dissolve_all_lp: ValidatorTrust/Permit length mismatch: netuid={netuid:?}, trust_len={trust_len}, permit_len={permit_len}" - ); - return Err(sp_runtime::DispatchError::Other( - "validator_meta_len_mismatch", - )); - } - // Helper: pick target validator uid, only among permitted validators, by highest trust. let pick_target_uid = |trust: &Vec, permit: &Vec| -> Option { let mut best_uid: Option = None; From 8621e16062476c4726dd7d4fbc2db1c61ce72cb9 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Tue, 23 Sep 2025 14:19:23 -0700 Subject: [PATCH 376/379] Update tests.rs --- pallets/swap/src/pallet/tests.rs | 145 ++++++++++++++++++++++++------- 1 file changed, 112 insertions(+), 33 deletions(-) diff --git a/pallets/swap/src/pallet/tests.rs b/pallets/swap/src/pallet/tests.rs index afd70e0b66..72c33d698f 100644 --- a/pallets/swap/src/pallet/tests.rs +++ b/pallets/swap/src/pallet/tests.rs @@ -1982,7 +1982,6 @@ fn test_swap_subtoken_disabled() { }); } -/// V3 path: protocol + user positions exist, fees accrued, everything must be removed. #[test] fn test_liquidate_v3_removes_positions_ticks_and_state() { new_test_ext().execute_with(|| { @@ -1992,7 +1991,7 @@ fn test_liquidate_v3_removes_positions_ticks_and_state() { assert_ok!(Pallet::::maybe_initialize_v3(netuid)); assert!(SwapV3Initialized::::get(netuid)); - // Enable user LP (mock usually enables for 0..=100, but be explicit and consistent) + // Enable user LP assert_ok!(Swap::toggle_user_liquidity( RuntimeOrigin::root(), netuid.into(), @@ -2041,14 +2040,14 @@ fn test_liquidate_v3_removes_positions_ticks_and_state() { assert!(Ticks::::get(netuid, TickIndex::MAX).is_some()); assert!(CurrentLiquidity::::get(netuid) > 0); - // There should be some bitmap words (active ticks) after adding a position. let had_bitmap_words = TickIndexBitmapWords::::iter_prefix((netuid,)) .next() .is_some(); assert!(had_bitmap_words); - // ACT: Liquidate & reset swap state + // ACT: users-only liquidation then protocol clear assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); // ASSERT: positions cleared (both user and protocol) assert_eq!( @@ -2091,12 +2090,11 @@ fn test_liquidate_v3_removes_positions_ticks_and_state() { }); } -/// V3 path with user liquidity disabled at teardown: must still remove all positions and clear state. +/// V3 path with user liquidity disabled at teardown: +/// must still remove positions and clear state (after protocol clear). #[test] fn test_liquidate_v3_with_user_liquidity_disabled() { new_test_ext().execute_with(|| { - // Pick a netuid the mock treats as "disabled" by default (per your comment >100), - // then explicitly walk through enable -> add -> disable -> liquidate. let netuid = NetUid::from(101); assert_ok!(Pallet::::maybe_initialize_v3(netuid)); @@ -2125,15 +2123,16 @@ fn test_liquidate_v3_with_user_liquidity_disabled() { ) .expect("add liquidity"); - // Disable user LP *before* liquidation to validate that removal ignores this flag. + // Disable user LP *before* liquidation; removal must ignore this flag. assert_ok!(Swap::toggle_user_liquidity( RuntimeOrigin::root(), netuid.into(), false )); - // ACT + // Users-only dissolve, then clear protocol liquidity/state. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); // ASSERT: positions & ticks gone, state reset assert_eq!( @@ -2158,7 +2157,7 @@ fn test_liquidate_v3_with_user_liquidity_disabled() { assert!(!FeeGlobalTao::::contains_key(netuid)); assert!(!FeeGlobalAlpha::::contains_key(netuid)); - // `EnabledUserLiquidity` is removed by liquidation. + // `EnabledUserLiquidity` is removed by protocol clear stage. assert!(!EnabledUserLiquidity::::contains_key(netuid)); }); } @@ -2205,7 +2204,6 @@ fn test_liquidate_non_v3_uninitialized_ok_and_clears() { }); } -/// Idempotency: calling liquidation twice is safe (both V3 and non‑V3 flavors). #[test] fn test_liquidate_idempotent() { // V3 flavor @@ -2230,11 +2228,14 @@ fn test_liquidate_idempotent() { 123_456_789 )); - // 1st liquidation + // Users-only liquidations are idempotent. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); - // 2nd liquidation (no state left) — must still succeed assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); + // Now clear protocol liquidity/state—also idempotent. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + // State remains empty assert!( Positions::::iter_prefix_values((netuid, OK_COLDKEY_ACCOUNT_ID)) @@ -2254,7 +2255,7 @@ fn test_liquidate_idempotent() { new_test_ext().execute_with(|| { let netuid = NetUid::from(8); - // Never initialize V3 + // Never initialize V3; both calls no-op and succeed. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); @@ -2286,7 +2287,7 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { )); assert_ok!(Pallet::::maybe_initialize_v3(netuid)); - // Use distinct cold/hot to demonstrate alpha refund goes to (owner, owner). + // Use distinct cold/hot to demonstrate alpha refund/stake accounting. let cold = OK_COLDKEY_ACCOUNT_ID; let hot = OK_HOTKEY_ACCOUNT_ID; @@ -2322,15 +2323,14 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { ::BalanceOps::increase_provided_tao_reserve(netuid.into(), tao_taken); ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); - // Liquidate everything on the subnet. + // Users‑only liquidation. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); // Expect balances restored to BEFORE snapshots (no swaps ran -> zero fees). - // TAO: we withdrew 'need_tao' above and liquidation refunded it, so we should be back to 'tao_before'. let tao_after = ::BalanceOps::tao_balance(&cold); assert_eq!(tao_after, tao_before, "TAO principal must be refunded"); - // ALPHA: refund is credited to (coldkey=cold, hotkey=cold). Compare totals across both ledgers. + // ALPHA totals conserved to owner (distribution may differ). let alpha_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); let alpha_after_owner = @@ -2338,9 +2338,12 @@ fn liquidate_v3_refunds_user_funds_and_clears_state() { let alpha_after_total = alpha_after_hot + alpha_after_owner; assert_eq!( alpha_after_total, alpha_before_total, - "ALPHA principal must be refunded to the account (may be credited to (owner, owner))" + "ALPHA principal must be refunded/staked for the account (check totals)" ); + // Clear protocol liquidity and V3 state now. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + // User position(s) are gone and all V3 state cleared. assert_eq!(Pallet::::count_positions(netuid, &cold), 0); assert!(Ticks::::iter_prefix(netuid).next().is_none()); @@ -2386,10 +2389,10 @@ fn refund_alpha_single_provider_exact() { .expect("decrease ALPHA"); ::BalanceOps::increase_provided_alpha_reserve(netuid.into(), alpha_taken); - // --- Act: dissolve (calls refund_alpha inside). + // --- Act: users‑only dissolve. assert_ok!(Pallet::::do_dissolve_all_liquidity_providers(netuid)); - // --- Assert: refunded back to the owner (may credit to (cold,cold)). + // --- Assert: total α conserved to owner (may be staked to validator). let alpha_after_hot = ::BalanceOps::alpha_balance(netuid.into(), &cold, &hot); let alpha_after_owner = @@ -2397,9 +2400,12 @@ fn refund_alpha_single_provider_exact() { let alpha_after_total = alpha_after_hot + alpha_after_owner; assert_eq!( alpha_after_total, alpha_before_total, - "ALPHA principal must be conserved to the owner" + "ALPHA principal must be conserved to the account" ); + // Clear protocol liquidity and V3 state now. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + // --- State is cleared. assert!(Ticks::::iter_prefix(netuid).next().is_none()); assert_eq!(Pallet::::count_positions(netuid, &cold), 0); @@ -2593,7 +2599,6 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { ::BalanceOps::alpha_balance(netuid.into(), &cold, &validator_hotkey); let alpha_before_total = if validator_hotkey == hot { - // Avoid double counting when validator == user's hotkey. alpha_before_hot + alpha_before_owner } else { alpha_before_hot + alpha_before_owner + alpha_before_val @@ -2635,13 +2640,10 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { ); if validator_hotkey == hot { - // Net effect: user's hot ledger returns to its original balance. assert_eq!( alpha_after_hot, alpha_before_hot, "When validator == hotkey, user's hot ledger must net back to its original balance" ); - - // Totals without double-counting the same ledger. let alpha_after_total = alpha_after_hot + alpha_after_owner; assert_eq!( alpha_after_total, alpha_before_total, @@ -2659,13 +2661,11 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { let hot_loss = alpha_before_hot - alpha_after_hot; let val_gain = alpha_after_val - alpha_before_val; - assert_eq!( val_gain, hot_loss, "α that left the user's hot ledger must equal α credited to the validator ledger" ); - // Totals across distinct ledgers must be conserved. let alpha_after_total = alpha_after_hot + alpha_after_owner + alpha_after_val; assert_eq!( alpha_after_total, alpha_before_total, @@ -2673,9 +2673,10 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { ); } - // --- Assert: All positions (user + protocol) removed and V3 state cleared --- - let protocol_id = Pallet::::protocol_account_id(); + // Now clear protocol liquidity & state and assert full reset. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + let protocol_id = Pallet::::protocol_account_id(); assert_eq!(Pallet::::count_positions(netuid, &cold), 0); let prot_positions_after = Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); @@ -2684,7 +2685,6 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { "protocol positions must be removed" ); - // Ticks / liquidity / price / flags cleared assert!(Ticks::::iter_prefix(netuid).next().is_none()); assert!(Ticks::::get(netuid, TickIndex::MIN).is_none()); assert!(Ticks::::get(netuid, TickIndex::MAX).is_none()); @@ -2693,11 +2693,9 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { assert!(!AlphaSqrtPrice::::contains_key(netuid)); assert!(!SwapV3Initialized::::contains_key(netuid)); - // Fee globals cleared assert!(!FeeGlobalTao::::contains_key(netuid)); assert!(!FeeGlobalAlpha::::contains_key(netuid)); - // Active tick bitmap cleared assert!( TickIndexBitmapWords::::iter_prefix((netuid,)) .next() @@ -2705,8 +2703,89 @@ fn test_dissolve_v3_green_path_refund_tao_stake_alpha_and_clear_state() { "active tick bitmap words must be cleared" ); + assert!(!FeeRate::::contains_key(netuid)); + assert!(!EnabledUserLiquidity::::contains_key(netuid)); + }); +} + +#[test] +fn test_clear_protocol_liquidity_green_path() { + new_test_ext().execute_with(|| { + // --- Arrange --- + let netuid = NetUid::from(55); + + // Ensure the "user liquidity enabled" flag exists so we can verify it's removed later. + assert_ok!(Pallet::::toggle_user_liquidity( + RuntimeOrigin::root(), + netuid, + true + )); + + // Initialize V3 state; this should set price/tick flags and create a protocol position. + assert_ok!(Pallet::::maybe_initialize_v3(netuid)); + assert!( + SwapV3Initialized::::get(netuid), + "V3 must be initialized" + ); + + // Sanity: protocol positions exist before clearing. + let protocol_id = Pallet::::protocol_account_id(); + let prot_positions_before = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + !prot_positions_before.is_empty(), + "protocol positions should exist after V3 init" + ); + + // --- Act --- + // Green path: just clear protocol liquidity and wipe all V3 state. + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + + // --- Assert: all protocol positions removed --- + let prot_positions_after = + Positions::::iter_prefix_values((netuid, protocol_id)).collect::>(); + assert!( + prot_positions_after.is_empty(), + "protocol positions must be removed by do_clear_protocol_liquidity" + ); + + // --- Assert: V3 data wiped (idempotent even if some maps were empty) --- + // Ticks / active tick bitmap + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none(), + "active tick bitmap words must be cleared" + ); + + // Fee globals + assert!(!FeeGlobalTao::::contains_key(netuid)); + assert!(!FeeGlobalAlpha::::contains_key(netuid)); + + // Price / tick / liquidity / flags + assert!(!AlphaSqrtPrice::::contains_key(netuid)); + assert!(!CurrentTick::::contains_key(netuid)); + assert!(!CurrentLiquidity::::contains_key(netuid)); + assert!(!SwapV3Initialized::::contains_key(netuid)); + // Knobs removed assert!(!FeeRate::::contains_key(netuid)); assert!(!EnabledUserLiquidity::::contains_key(netuid)); + + // --- And it's idempotent --- + assert_ok!(Pallet::::do_clear_protocol_liquidity(netuid)); + assert!( + Positions::::iter_prefix_values((netuid, protocol_id)) + .next() + .is_none() + ); + assert!(Ticks::::iter_prefix(netuid).next().is_none()); + assert!( + TickIndexBitmapWords::::iter_prefix((netuid,)) + .next() + .is_none() + ); + assert!(!SwapV3Initialized::::contains_key(netuid)); }); } From 2d1fed52ae1c75cfd4d458b4c562ad165923b1d7 Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 24 Sep 2025 09:31:00 -0700 Subject: [PATCH 377/379] use root --- pallets/swap/src/pallet/impls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index 9a41283426..bfe15234ed 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1286,7 +1286,7 @@ impl Pallet { T::BalanceOps::increase_stake( &owner, &validator_hotkey, - netuid, + NetUid::ROOT, alpha_total_from_pool, )?; From 09a028765f0e4a942a113be7f0056f53be0d914a Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 24 Sep 2025 09:35:26 -0700 Subject: [PATCH 378/379] Update impls.rs --- pallets/swap/src/pallet/impls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/swap/src/pallet/impls.rs b/pallets/swap/src/pallet/impls.rs index bfe15234ed..9a41283426 100644 --- a/pallets/swap/src/pallet/impls.rs +++ b/pallets/swap/src/pallet/impls.rs @@ -1286,7 +1286,7 @@ impl Pallet { T::BalanceOps::increase_stake( &owner, &validator_hotkey, - NetUid::ROOT, + netuid, alpha_total_from_pool, )?; From 321f198a9f1af959af04f46d216d2808ceeb380f Mon Sep 17 00:00:00 2001 From: John Reed <87283488+JohnReedV@users.noreply.github.com> Date: Wed, 24 Sep 2025 09:44:54 -0700 Subject: [PATCH 379/379] bump spec --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 20f4bac2b3..df11059c2a 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 319, + spec_version: 320, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1,