From ca382f32033d8350d49934b30680d8c30dd9bdfc Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Mon, 19 Feb 2024 16:29:47 +0100 Subject: [PATCH 01/51] Contracts: xcm host fn fixes (#3086) ## Xcm changes: - Fix `pallet_xcm::execute`, move the logic into The `ExecuteController` so it can be shared with anything that implement that trait. - Make `ExecuteController::execute` retursn `DispatchErrorWithPostInfo` instead of `DispatchError`, so that we don't charge the full `max_weight` provided if the execution is incomplete (useful for force_batch or contracts calls) - Fix docstring for `pallet_xcm::execute`, to reflect the changes from #2405 - Update the signature for `ExecuteController::execute`, we don't need to return the `Outcome` anymore since we only care about `Outcome::Complete` ## Contracts changes: - Update host fn `xcm_exexute`, we don't need to write the `Outcome` to the sandbox memory anymore. This was also not charged as well before so it if fixes this too. - One of the issue was that the dry_run of a contract that call `xcm_execute` would exhaust the `gas_limit`. This is because `XcmExecuteController::execute` takes a `max_weight` argument, and since we don't want the user to specify it manually we were passing everything left by pre-charghing `ctx.ext.gas_meter().gas_left()` - To fix it I added a `fn influence_lowest_limit` on the `Token` trait and make it return false for `RuntimeCost::XcmExecute`. - Got rid of the `RuntimeToken` indirection, we can just use `RuntimeCost` directly. --------- Co-authored-by: command-bot <> --- polkadot/xcm/pallet-xcm/src/lib.rs | 60 +++++++----- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 38 ++++---- polkadot/xcm/xcm-builder/src/controller.rs | 15 ++- .../fixtures/contracts/xcm_execute.rs | 11 ++- .../frame/contracts/mock-network/src/lib.rs | 3 +- .../frame/contracts/mock-network/src/tests.rs | 71 ++++++++++---- substrate/frame/contracts/src/exec.rs | 7 ++ substrate/frame/contracts/src/gas.rs | 9 +- substrate/frame/contracts/src/wasm/mod.rs | 4 + substrate/frame/contracts/src/wasm/runtime.rs | 95 ++++++++----------- substrate/frame/contracts/uapi/src/host.rs | 3 +- .../frame/contracts/uapi/src/host/riscv32.rs | 2 +- .../frame/contracts/uapi/src/host/wasm32.rs | 7 +- 13 files changed, 192 insertions(+), 133 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 586c275ae980..5e1a3e55f9b6 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -29,7 +29,7 @@ pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use frame_support::{ - dispatch::GetDispatchInfo, + dispatch::{DispatchErrorWithPostInfo, GetDispatchInfo, WithPostDispatchInfo}, pallet_prelude::*, traits::{ Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, @@ -291,22 +291,38 @@ pub mod pallet { origin: OriginFor, message: Box::RuntimeCall>>, max_weight: Weight, - ) -> Result { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - let outcome = T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - ); + ) -> Result { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| { + e.with_weight(::execute()) + })?; + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - Ok(outcome) + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete.with_weight( + weight_used.saturating_add( + ::execute(), + ), + ) + })?; + Ok(weight_used) } } @@ -1009,9 +1025,6 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. - /// - /// NOTE: A successful return to this does *not* imply that the `msg` was executed - /// successfully to completion; only that it was attempted. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1019,13 +1032,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = >::execute(origin, message, max_weight)?; - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete - })?; + let weight_used = + >::execute(origin, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index e5ac80c3a142..28c7d197443b 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,11 +20,12 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, - VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, + ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, + VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, + WeightInfo, }; use frame_support::{ - assert_noop, assert_ok, + assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, weights::Weight, }; @@ -450,19 +451,19 @@ fn trapped_assets_can_be_claimed() { assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE + SEND_AMOUNT); assert_eq!(AssetTraps::::iter().collect::>(), vec![]); - let weight = BaseXcmWeight::get() * 3; - assert_ok!(>::execute( - RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::from(Xcm(vec![ - ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]))), - weight - )); - let outcome = - Outcome::Incomplete { used: BaseXcmWeight::get(), error: XcmError::UnknownClaim }; - assert_eq!(last_event(), RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome })); + // Can't claim twice. + assert_err_ignore_postinfo!( + XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, + buy_execution((Here, SEND_AMOUNT)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, + ]))), + weight + ), + Error::::LocalExecutionIncomplete + ); }); } @@ -495,11 +496,14 @@ fn incomplete_execute_reverts_side_effects() { // all effects are reverted and balances unchanged for either sender or receiver assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE); + assert_eq!( result, Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { - actual_weight: None, + actual_weight: Some( + as ExecuteControllerWeightInfo>::execute() + weight + ), pays_fee: frame_support::dispatch::Pays::Yes, }, error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 8ead18b5bd7f..ba2b1fb44b8e 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -18,7 +18,10 @@ //! Controller traits defined in this module are high-level traits that will rely on other traits //! from `xcm-executor` to perform their tasks. -use frame_support::pallet_prelude::DispatchError; +use frame_support::{ + dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, + pallet_prelude::DispatchError, +}; use sp_std::boxed::Box; use xcm::prelude::*; pub use xcm_executor::traits::QueryHandler; @@ -52,7 +55,8 @@ pub trait ExecuteController { /// Weight information for ExecuteController functions. type WeightInfo: ExecuteControllerWeightInfo; - /// Attempt to execute an XCM locally, and return the outcome. + /// Attempt to execute an XCM locally, returns Ok with the weight consumed if the execution + /// complete successfully, Err otherwise. /// /// # Parameters /// @@ -63,7 +67,7 @@ pub trait ExecuteController { origin: Origin, message: Box>, max_weight: Weight, - ) -> Result; + ) -> Result; } /// Weight functions needed for [`SendController`]. @@ -137,8 +141,9 @@ impl ExecuteController for () { _origin: Origin, _message: Box>, _max_weight: Weight, - ) -> Result { - Ok(Outcome::Error { error: XcmError::Unimplemented }) + ) -> Result { + Err(DispatchError::Other("ExecuteController::execute not implemented") + .with_weight(Weight::zero())) } } diff --git a/substrate/frame/contracts/fixtures/contracts/xcm_execute.rs b/substrate/frame/contracts/fixtures/contracts/xcm_execute.rs index 09d0b6cf9728..1d570ffead71 100644 --- a/substrate/frame/contracts/fixtures/contracts/xcm_execute.rs +++ b/substrate/frame/contracts/fixtures/contracts/xcm_execute.rs @@ -30,10 +30,11 @@ pub extern "C" fn deploy() {} pub extern "C" fn call() { input!(512, msg: [u8],); - let mut outcome = [0u8; 512]; - let outcome = &mut &mut outcome[..]; - #[allow(deprecated)] - api::xcm_execute(msg, outcome).unwrap(); - api::return_value(uapi::ReturnFlags::empty(), outcome); + let err_code = match api::xcm_execute(msg) { + Ok(_) => 0u32, + Err(code) => code as u32, + }; + + api::return_value(uapi::ReturnFlags::empty(), &err_code.to_le_bytes()); } diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs index eea9dde062c8..8a17a3f2fa78 100644 --- a/substrate/frame/contracts/mock-network/src/lib.rs +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -26,7 +26,8 @@ use crate::primitives::{AccountId, UNITS}; use sp_runtime::BuildStorage; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; -use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; +pub use xcm_simulator::TestExt; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain}; // Accounts pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index c25373bcbe3a..d22221fe8ee0 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -21,7 +21,6 @@ use crate::{ primitives::{AccountId, CENTS}, relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; -use assert_matches::assert_matches; use codec::{Decode, Encode}; use frame_support::{ assert_err, @@ -31,11 +30,18 @@ use frame_support::{ use pallet_balances::{BalanceLock, Reasons}; use pallet_contracts::{Code, CollectEvents, DebugInfo, Determinism}; use pallet_contracts_fixtures::compile_module; +use pallet_contracts_uapi::ReturnErrorCode; use xcm::{v4::prelude::*, VersionedLocation, VersionedXcm}; use xcm_simulator::TestExt; type ParachainContracts = pallet_contracts::Pallet; +macro_rules! assert_return_code { + ( $x:expr , $y:expr $(,)? ) => {{ + assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); + }}; +} + /// Instantiate the tests contract, and fund it with some balance and assets. fn instantiate_test_contract(name: &str) -> AccountId { let (wasm, _) = compile_module::(name).unwrap(); @@ -100,19 +106,57 @@ fn test_xcm_execute() { DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, - ) - .result - .unwrap(); + ); - let mut data = &result.data[..]; - let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); - assert_matches!(outcome, Outcome::Complete { .. }); + assert_eq!(result.gas_consumed, result.gas_required); + assert_return_code!(&result.result.unwrap(), ReturnErrorCode::Success); // Check if the funds are subtracted from the account of Alice and added to the account of // Bob. let initial = INITIAL_BALANCE; - assert_eq!(parachain::Assets::balance(0, contract_addr), initial); assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); + assert_eq!(ParachainBalances::free_balance(&contract_addr), initial - amount); + }); +} + +#[test] +fn test_xcm_execute_incomplete() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + let amount = 10 * CENTS; + + // Execute XCM instructions through the contract. + ParaA::execute_with(|| { + // The XCM used to transfer funds to Bob. + let message: Xcm<()> = Xcm(vec![ + WithdrawAsset(vec![(Here, amount).into()].into()), + // This will fail as the contract does not have enough balance to complete both + // withdrawals. + WithdrawAsset(vec![(Here, INITIAL_BALANCE).into()].into()), + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), + }, + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V4(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + assert_eq!(result.gas_consumed, result.gas_required); + assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); + + assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); + assert_eq!(ParachainBalances::free_balance(&contract_addr), INITIAL_BALANCE - amount); }); } @@ -182,17 +226,10 @@ fn test_xcm_execute_reentrant_call() { DebugInfo::UnsafeDebug, CollectEvents::UnsafeCollect, Determinism::Enforced, - ) - .result - .unwrap(); - - let mut data = &result.data[..]; - let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); - assert_matches!( - outcome, - Outcome::Incomplete { used: _, error: XcmError::ExpectationFalse } ); + assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); + // Funds should not change hands as the XCM transact failed. assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); }); diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 6f7131f3ea41..79220021efe4 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -287,6 +287,9 @@ pub trait Ext: sealing::Sealed { /// Returns `true` if debug message recording is enabled. Otherwise `false` is returned. fn append_debug_buffer(&mut self, msg: &str) -> bool; + /// Returns `true` if debug message recording is enabled. Otherwise `false` is returned. + fn debug_buffer_enabled(&self) -> bool; + /// Call some dispatchable and return the result. fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; @@ -1429,6 +1432,10 @@ where self.top_frame_mut().nested_storage.charge(diff) } + fn debug_buffer_enabled(&self) -> bool { + self.debug_message.is_some() + } + fn append_debug_buffer(&mut self, msg: &str) -> bool { if let Some(buffer) = &mut self.debug_message { buffer diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index 11292dc03f03..9271b615d002 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -63,6 +63,11 @@ pub trait Token: Copy + Clone + TestAuxiliaries { /// while calculating the amount. In this case it is ok to use saturating operations /// since on overflow they will return `max_value` which should consume all gas. fn weight(&self) -> Weight; + + /// Returns true if this token is expected to influence the lowest gas limit. + fn influence_lowest_gas_limit(&self) -> bool { + true + } } /// A wrapper around a type-erased trait object of what used to be a `Token`. @@ -160,7 +165,9 @@ impl GasMeter { /// This is when a maximum a priori amount was charged and then should be partially /// refunded to match the actual amount. pub fn adjust_gas>(&mut self, charged_amount: ChargedAmount, token: Tok) { - self.gas_left_lowest = self.gas_left_lowest(); + if token.influence_lowest_gas_limit() { + self.gas_left_lowest = self.gas_left_lowest(); + } let adjustment = charged_amount.0.saturating_sub(token.weight()); self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index d00ec2e47521..1c7395ec3bff 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -687,6 +687,10 @@ mod tests { &mut self.gas_meter } fn charge_storage(&mut self, _diff: &crate::storage::meter::Diff) {} + + fn debug_buffer_enabled(&self) -> bool { + true + } fn append_debug_buffer(&mut self, msg: &str) -> bool { self.debug_buffer.extend(msg.as_bytes()); true diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 3af0d04a3ad1..74b19910c371 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -21,7 +21,6 @@ use crate::{ exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, primitives::ExecReturnValue, - schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; @@ -235,6 +234,8 @@ pub enum RuntimeCosts { ChainExtension(Weight), /// Weight charged for calling into the runtime. CallRuntime(Weight), + /// Weight charged for calling xcm_execute. + CallXcmExecute(Weight), /// Weight of calling `seal_set_code_hash` SetCodeHash, /// Weight of calling `ecdsa_to_eth_address` @@ -251,10 +252,18 @@ pub enum RuntimeCosts { RemoveDelegateDependency, } -impl RuntimeCosts { - fn token(&self, s: &HostFnWeights) -> RuntimeToken { +impl Token for RuntimeCosts { + fn influence_lowest_gas_limit(&self) -> bool { + match self { + &Self::CallXcmExecute(_) => false, + _ => true, + } + } + + fn weight(&self) -> Weight { + let s = T::Schedule::get().host_fn_weights; use self::RuntimeCosts::*; - let weight = match *self { + match *self { CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), Caller => s.caller, @@ -323,8 +332,7 @@ impl RuntimeCosts { Sr25519Verify(len) => s .sr25519_verify .saturating_add(s.sr25519_verify_per_byte.saturating_mul(len.into())), - ChainExtension(weight) => weight, - CallRuntime(weight) => weight, + ChainExtension(weight) | CallRuntime(weight) | CallXcmExecute(weight) => weight, SetCodeHash => s.set_code_hash, EcdsaToEthAddress => s.ecdsa_to_eth_address, ReentrantCount => s.reentrance_count, @@ -332,11 +340,6 @@ impl RuntimeCosts { InstantationNonce => s.instantiation_nonce, AddDelegateDependency => s.add_delegate_dependency, RemoveDelegateDependency => s.remove_delegate_dependency, - }; - RuntimeToken { - #[cfg(test)] - _created_from: *self, - weight, } } } @@ -347,25 +350,10 @@ impl RuntimeCosts { /// a function won't work out. macro_rules! charge_gas { ($runtime:expr, $costs:expr) => {{ - let token = $costs.token(&$runtime.ext.schedule().host_fn_weights); - $runtime.ext.gas_meter_mut().charge(token) + $runtime.ext.gas_meter_mut().charge($costs) }}; } -#[cfg_attr(test, derive(Debug, PartialEq, Eq))] -#[derive(Copy, Clone)] -struct RuntimeToken { - #[cfg(test)] - _created_from: RuntimeCosts, - weight: Weight, -} - -impl Token for RuntimeToken { - fn weight(&self) -> Weight { - self.weight - } -} - /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract @@ -506,28 +494,25 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { /// This is when a maximum a priori amount was charged and then should be partially /// refunded to match the actual amount. pub fn adjust_gas(&mut self, charged: ChargedAmount, actual_costs: RuntimeCosts) { - let token = actual_costs.token(&self.ext.schedule().host_fn_weights); - self.ext.gas_meter_mut().adjust_gas(charged, token); + self.ext.gas_meter_mut().adjust_gas(charged, actual_costs); } /// Charge, Run and adjust gas, for executing the given dispatchable. - fn call_dispatchable< - ErrorReturnCode: Get, - F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, - >( + fn call_dispatchable>( &mut self, dispatch_info: DispatchInfo, - run: F, + runtime_cost: impl Fn(Weight) -> RuntimeCosts, + run: impl FnOnce(&mut Self) -> DispatchResultWithPostInfo, ) -> Result { use frame_support::dispatch::extract_actual_weight; - let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let charged = self.charge_gas(runtime_cost(dispatch_info.weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); - self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + self.adjust_gas(charged, runtime_cost(actual_weight)); match result { Ok(_) => Ok(ReturnErrorCode::Success), Err(e) => { - if self.ext.append_debug_buffer("") { + if self.ext.debug_buffer_enabled() { self.ext.append_debug_buffer("call failed with: "); self.ext.append_debug_buffer(e.into()); }; @@ -2113,9 +2098,11 @@ pub mod env { ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(memory, call_ptr, call_len)?; - ctx.call_dispatchable::(call.get_dispatch_info(), |ctx| { - ctx.ext.call_runtime(call) - }) + ctx.call_dispatchable::( + call.get_dispatch_info(), + RuntimeCosts::CallRuntime, + |ctx| ctx.ext.call_runtime(call), + ) } /// Execute an XCM program locally, using the contract's address as the origin. @@ -2126,7 +2113,6 @@ pub mod env { memory: _, msg_ptr: u32, msg_len: u32, - output_ptr: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; use xcm::VersionedXcm; @@ -2135,26 +2121,27 @@ pub mod env { ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let message: VersionedXcm> = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + ensure_executable::(&message)?; let execute_weight = <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; - ensure_executable::(&message)?; - ctx.call_dispatchable::(dispatch_info, |ctx| { - let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let outcome = <::Xcm>::execute( - origin, - Box::new(message), - weight.saturating_sub(execute_weight), - )?; + ctx.call_dispatchable::( + dispatch_info, + RuntimeCosts::CallXcmExecute, + |ctx| { + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + let weight_used = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; - ctx.write_sandbox_memory(memory, output_ptr, &outcome.encode())?; - let pre_dispatch_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); - Ok(Some(outcome.weight_used().saturating_add(pre_dispatch_weight)).into()) - }) + Ok(Some(weight_used.saturating_add(execute_weight)).into()) + }, + ) } /// Send an XCM program from the contract to the specified destination. diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index c8b9ae8b2def..7894dc3a210a 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -792,7 +792,7 @@ pub trait HostFn { #[deprecated( note = "Unstable function. Behaviour can change without further notice. Use only for testing." )] - fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result; + fn xcm_execute(msg: &[u8]) -> Result; /// Send an XCM program from the contract to the specified destination. /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that @@ -804,7 +804,6 @@ pub trait HostFn { /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. - /// - `output`: A reference to the output data buffer to write the [XcmHash](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/v3/type.XcmHash.html) /// /// # Return /// diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index b1934cc469e4..dc1c48308df8 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -297,7 +297,7 @@ impl HostFn for HostFnImpl { todo!() } - fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result { + fn xcm_execute(msg: &[u8]) -> Result { todo!() } diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 77cf22891e2f..3df3abeb7dcd 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -160,7 +160,7 @@ mod sys { pub fn weight_to_fee(gas: u64, output_ptr: *mut u8, output_len_ptr: *mut u32); - pub fn xcm_execute(msg_ptr: *const u8, msg_len: u32, output_ptr: *mut u8) -> ReturnCode; + pub fn xcm_execute(msg_ptr: *const u8, msg_len: u32) -> ReturnCode; pub fn xcm_send( dest_ptr: *const u8, @@ -819,9 +819,8 @@ impl HostFn for HostFnImpl { unsafe { sys::reentrance_count() } } - fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result { - let ret_code = - unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) }; + fn xcm_execute(msg: &[u8]) -> Result { + let ret_code = unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _) }; ret_code.into() } From 5f8b6f38a3406b5ca901b8972860c78300313659 Mon Sep 17 00:00:00 2001 From: girazoki Date: Tue, 20 Feb 2024 15:28:05 +0100 Subject: [PATCH 02/51] implement ConversionToAssetBalance in asset-rate (#2903) Implements the `ConversionToAssetBalance` trait to asset-rate by doing `1/rate*(balance)`. --- prdoc/pr_2903.prdoc | 15 +++++++++++ substrate/frame/asset-rate/src/lib.rs | 34 +++++++++++++++++++++++-- substrate/frame/asset-rate/src/tests.rs | 29 +++++++++++++++++++-- 3 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_2903.prdoc diff --git a/prdoc/pr_2903.prdoc b/prdoc/pr_2903.prdoc new file mode 100644 index 000000000000..dd22fa0eea37 --- /dev/null +++ b/prdoc/pr_2903.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Implement `ConversionToAssetBalance` in asset-rate" + +doc: + - audience: Runtime Dev + description: | + Implements the `ConversionToAssetBalance` trait to the asset-rate pallet. + + Previously only the `ConversionFromAssetBalance` trait was implemented, which would allow to convert an asset balance into the corresponding native balance. + + The `ConversionToAssetBalance` allows to use pallet-asset-rate, e.g., as a mechanism to charge XCM fees in an asset that is not the native. +crates: [ ] + diff --git a/substrate/frame/asset-rate/src/lib.rs b/substrate/frame/asset-rate/src/lib.rs index d4afca8b73c4..befabfe54aa0 100644 --- a/substrate/frame/asset-rate/src/lib.rs +++ b/substrate/frame/asset-rate/src/lib.rs @@ -59,8 +59,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::{fungible::Inspect, tokens::ConversionFromAssetBalance}; -use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128}; +use frame_support::traits::{ + fungible::Inspect, + tokens::{ConversionFromAssetBalance, ConversionToAssetBalance}, +}; +use sp_runtime::{ + traits::{CheckedDiv, Zero}, + FixedPointNumber, FixedU128, +}; use sp_std::boxed::Box; pub use pallet::*; @@ -144,6 +150,8 @@ pub mod pallet { UnknownAssetKind, /// The given asset ID already has an assigned conversion rate and cannot be re-created. AlreadyExists, + /// Overflow ocurred when calculating the inverse rate. + Overflow, } #[pallet::call] @@ -246,3 +254,25 @@ where pallet::ConversionRateToNative::::set(asset_id.clone(), Some(1.into())); } } + +/// Exposes conversion of a native balance to an asset balance. +impl ConversionToAssetBalance, AssetKindOf, BalanceOf> for Pallet +where + T: Config, +{ + type Error = pallet::Error; + + fn to_asset_balance( + balance: BalanceOf, + asset_kind: AssetKindOf, + ) -> Result, pallet::Error> { + let rate = pallet::ConversionRateToNative::::get(asset_kind) + .ok_or(pallet::Error::::UnknownAssetKind.into())?; + + // We cannot use `saturating_div` here so we use `checked_div`. + Ok(FixedU128::from_u32(1) + .checked_div(&rate) + .ok_or(pallet::Error::::Overflow.into())? + .saturating_mul_int(balance)) + } +} diff --git a/substrate/frame/asset-rate/src/tests.rs b/substrate/frame/asset-rate/src/tests.rs index 452265476093..fc7bf2f15031 100644 --- a/substrate/frame/asset-rate/src/tests.rs +++ b/substrate/frame/asset-rate/src/tests.rs @@ -131,12 +131,19 @@ fn convert_works() { FixedU128::from_float(2.51) )); - let conversion = , ::AssetKind, BalanceOf, >>::from_asset_balance(10, ASSET_ID); - assert_eq!(conversion.expect("Conversion rate exists for asset"), 25); + assert_eq!(conversion_from_asset.expect("Conversion rate exists for asset"), 25); + + let conversion_to_asset = , + ::AssetKind, + BalanceOf, + >>::to_asset_balance(25, ASSET_ID); + assert_eq!(conversion_to_asset.expect("Conversion rate exists for asset"), 9); }); } @@ -151,3 +158,21 @@ fn convert_unknown_throws() { assert!(conversion.is_err()); }); } + +#[test] +fn convert_overflow_throws() { + new_test_ext().execute_with(|| { + assert_ok!(AssetRate::create( + RuntimeOrigin::root(), + Box::new(ASSET_ID), + FixedU128::from_u32(0) + )); + + let conversion = , + ::AssetKind, + BalanceOf, + >>::to_asset_balance(10, ASSET_ID); + assert!(conversion.is_err()); + }); +} From ef6ac94f5218368fd89ce73493909bbfbc0eace8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 20 Feb 2024 15:28:05 +0100 Subject: [PATCH 03/51] Downgrade log message to `trace` (#3405) This spams logs in `Debug` with no useful information. --- polkadot/node/core/prospective-parachains/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 6e6915b92728..5937a1c1fb9f 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -298,7 +298,7 @@ async fn handle_active_leaves_update( ) .expect("ancestors are provided in reverse order and correctly; qed"); - gum::debug!( + gum::trace!( target: LOG_TARGET, relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, From e89d0fca351de0712f104c55fe45ed124b5c6968 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 20 Feb 2024 15:28:05 +0100 Subject: [PATCH 04/51] Lift dependencies to the workspace (Part 2/x) (#3366) Lifting some more dependencies to the workspace. Just using the most-often updated ones for now. It can be reproduced locally. ```sh # First you can check if there would be semver incompatible bumps (looks good in this case): $ zepter transpose dependency lift-to-workspace --ignore-errors syn quote thiserror "regex:^serde.*" # Then apply the changes: $ zepter transpose dependency lift-to-workspace --version-resolver=highest syn quote thiserror "regex:^serde.*" --fix # And format the changes: $ taplo format --config .config/taplo.toml ``` --------- Signed-off-by: Oliver Tale-Yazdi --- .config/taplo.toml | 1 + Cargo.toml | 8 ++++++++ bridges/primitives/header-chain/Cargo.toml | 2 +- bridges/primitives/messages/Cargo.toml | 2 +- bridges/primitives/polkadot-core/Cargo.toml | 2 +- bridges/primitives/runtime/Cargo.toml | 2 +- bridges/snowbridge/pallets/ethereum-client/Cargo.toml | 8 ++++---- bridges/snowbridge/pallets/inbound-queue/Cargo.toml | 2 +- bridges/snowbridge/pallets/outbound-queue/Cargo.toml | 2 +- bridges/snowbridge/primitives/beacon/Cargo.toml | 2 +- bridges/snowbridge/primitives/core/Cargo.toml | 2 +- bridges/snowbridge/primitives/ethereum/Cargo.toml | 6 +++--- bridges/snowbridge/primitives/router/Cargo.toml | 2 +- bridges/snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/consensus/proposer/Cargo.toml | 2 +- cumulus/client/relay-chain-interface/Cargo.toml | 2 +- cumulus/client/relay-chain-rpc-interface/Cargo.toml | 6 +++--- cumulus/pallets/parachain-system/proc-macro/Cargo.toml | 4 ++-- cumulus/parachain-template/node/Cargo.toml | 4 ++-- cumulus/parachain-template/pallets/template/Cargo.toml | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml | 2 +- .../runtimes/coretime/coretime-rococo/Cargo.toml | 2 +- .../runtimes/coretime/coretime-westend/Cargo.toml | 2 +- .../parachains/runtimes/people/people-rococo/Cargo.toml | 2 +- .../parachains/runtimes/people/people-westend/Cargo.toml | 2 +- cumulus/polkadot-parachain/Cargo.toml | 4 ++-- cumulus/test/service/Cargo.toml | 4 ++-- polkadot/cli/Cargo.toml | 2 +- polkadot/erasure-coding/Cargo.toml | 2 +- polkadot/node/collation-generation/Cargo.toml | 2 +- polkadot/node/core/approval-voting/Cargo.toml | 2 +- polkadot/node/core/av-store/Cargo.toml | 2 +- polkadot/node/core/backing/Cargo.toml | 2 +- polkadot/node/core/bitfield-signing/Cargo.toml | 2 +- polkadot/node/core/chain-selection/Cargo.toml | 2 +- polkadot/node/core/dispute-coordinator/Cargo.toml | 2 +- polkadot/node/core/parachains-inherent/Cargo.toml | 2 +- polkadot/node/core/prospective-parachains/Cargo.toml | 2 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- polkadot/node/core/pvf-checker/Cargo.toml | 2 +- polkadot/node/core/pvf/Cargo.toml | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 4 ++-- polkadot/node/jaeger/Cargo.toml | 2 +- .../node/network/availability-distribution/Cargo.toml | 2 +- polkadot/node/network/availability-recovery/Cargo.toml | 2 +- polkadot/node/network/bridge/Cargo.toml | 2 +- polkadot/node/network/collator-protocol/Cargo.toml | 2 +- polkadot/node/network/dispute-distribution/Cargo.toml | 2 +- polkadot/node/network/protocol/Cargo.toml | 2 +- polkadot/node/network/statement-distribution/Cargo.toml | 2 +- polkadot/node/primitives/Cargo.toml | 4 ++-- polkadot/node/service/Cargo.toml | 6 +++--- polkadot/node/subsystem-bench/Cargo.toml | 4 ++-- polkadot/node/subsystem-types/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 2 +- polkadot/node/test/service/Cargo.toml | 2 +- polkadot/node/zombienet-backchannel/Cargo.toml | 6 +++--- polkadot/parachain/Cargo.toml | 2 +- polkadot/primitives/Cargo.toml | 2 +- polkadot/runtime/common/Cargo.toml | 6 +++--- polkadot/runtime/parachains/Cargo.toml | 4 ++-- polkadot/runtime/rococo/Cargo.toml | 6 +++--- polkadot/runtime/test-runtime/Cargo.toml | 6 +++--- polkadot/runtime/westend/Cargo.toml | 6 +++--- polkadot/xcm/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm/Cargo.toml | 2 +- polkadot/xcm/procedural/Cargo.toml | 4 ++-- substrate/bin/minimal/node/Cargo.toml | 2 +- substrate/bin/node-template/node/Cargo.toml | 2 +- substrate/bin/node-template/runtime/Cargo.toml | 2 +- substrate/bin/node/bench/Cargo.toml | 4 ++-- substrate/bin/node/cli/Cargo.toml | 6 +++--- substrate/bin/node/inspect/Cargo.toml | 2 +- substrate/bin/node/runtime/Cargo.toml | 2 +- substrate/bin/utils/chain-spec-builder/Cargo.toml | 2 +- substrate/client/allocator/Cargo.toml | 2 +- substrate/client/api/Cargo.toml | 2 +- substrate/client/authority-discovery/Cargo.toml | 2 +- substrate/client/chain-spec/Cargo.toml | 4 ++-- substrate/client/chain-spec/derive/Cargo.toml | 4 ++-- substrate/client/cli/Cargo.toml | 6 +++--- substrate/client/consensus/aura/Cargo.toml | 2 +- substrate/client/consensus/babe/Cargo.toml | 2 +- substrate/client/consensus/babe/rpc/Cargo.toml | 6 +++--- substrate/client/consensus/beefy/Cargo.toml | 4 ++-- substrate/client/consensus/beefy/rpc/Cargo.toml | 6 +++--- substrate/client/consensus/common/Cargo.toml | 4 ++-- substrate/client/consensus/grandpa/Cargo.toml | 6 +++--- substrate/client/consensus/grandpa/rpc/Cargo.toml | 4 ++-- substrate/client/consensus/manual-seal/Cargo.toml | 4 ++-- substrate/client/consensus/pow/Cargo.toml | 2 +- substrate/client/executor/common/Cargo.toml | 2 +- substrate/client/keystore/Cargo.toml | 4 ++-- substrate/client/merkle-mountain-range/rpc/Cargo.toml | 4 ++-- substrate/client/mixnet/Cargo.toml | 2 +- substrate/client/network/Cargo.toml | 6 +++--- substrate/client/network/bitswap/Cargo.toml | 2 +- substrate/client/network/light/Cargo.toml | 2 +- substrate/client/network/sync/Cargo.toml | 2 +- substrate/client/rpc-api/Cargo.toml | 6 +++--- substrate/client/rpc-servers/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 6 +++--- substrate/client/rpc/Cargo.toml | 2 +- substrate/client/service/Cargo.toml | 6 +++--- substrate/client/storage-monitor/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 6 +++--- substrate/client/sysinfo/Cargo.toml | 4 ++-- substrate/client/telemetry/Cargo.toml | 6 +++--- substrate/client/tracing/Cargo.toml | 4 ++-- substrate/client/tracing/proc-macro/Cargo.toml | 4 ++-- substrate/client/transaction-pool/Cargo.toml | 4 ++-- substrate/client/transaction-pool/api/Cargo.toml | 6 +++--- substrate/frame/beefy-mmr/Cargo.toml | 2 +- substrate/frame/beefy/Cargo.toml | 2 +- substrate/frame/benchmarking/Cargo.toml | 2 +- substrate/frame/contracts/Cargo.toml | 2 +- substrate/frame/contracts/proc-macro/Cargo.toml | 4 ++-- substrate/frame/conviction-voting/Cargo.toml | 2 +- substrate/frame/democracy/Cargo.toml | 2 +- .../election-provider-support/solution-type/Cargo.toml | 4 ++-- substrate/frame/message-queue/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/offences/Cargo.toml | 2 +- substrate/frame/parameters/Cargo.toml | 2 +- substrate/frame/referenda/Cargo.toml | 2 +- substrate/frame/remark/Cargo.toml | 2 +- substrate/frame/staking/Cargo.toml | 2 +- substrate/frame/staking/reward-curve/Cargo.toml | 4 ++-- substrate/frame/state-trie-migration/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 4 ++-- substrate/frame/support/procedural/Cargo.toml | 4 ++-- substrate/frame/support/procedural/tools/Cargo.toml | 4 ++-- .../frame/support/procedural/tools/derive/Cargo.toml | 4 ++-- substrate/frame/support/test/Cargo.toml | 2 +- substrate/frame/support/test/pallet/Cargo.toml | 2 +- substrate/frame/system/Cargo.toml | 2 +- substrate/frame/tips/Cargo.toml | 2 +- substrate/frame/transaction-payment/Cargo.toml | 4 ++-- .../frame/transaction-payment/asset-tx-payment/Cargo.toml | 4 ++-- substrate/frame/transaction-storage/Cargo.toml | 2 +- substrate/frame/treasury/Cargo.toml | 2 +- substrate/primitives/api/Cargo.toml | 2 +- substrate/primitives/api/proc-macro/Cargo.toml | 4 ++-- substrate/primitives/application-crypto/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 2 +- substrate/primitives/blockchain/Cargo.toml | 2 +- substrate/primitives/consensus/babe/Cargo.toml | 2 +- substrate/primitives/consensus/beefy/Cargo.toml | 2 +- substrate/primitives/consensus/common/Cargo.toml | 2 +- substrate/primitives/consensus/grandpa/Cargo.toml | 2 +- substrate/primitives/consensus/sassafras/Cargo.toml | 2 +- substrate/primitives/consensus/slots/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 6 +++--- substrate/primitives/crypto/hashing/proc-macro/Cargo.toml | 4 ++-- substrate/primitives/debug-derive/Cargo.toml | 4 ++-- substrate/primitives/genesis-builder/Cargo.toml | 2 +- substrate/primitives/inherents/Cargo.toml | 2 +- substrate/primitives/maybe-compressed-blob/Cargo.toml | 2 +- substrate/primitives/merkle-mountain-range/Cargo.toml | 4 ++-- substrate/primitives/npos-elections/Cargo.toml | 2 +- substrate/primitives/rpc/Cargo.toml | 4 ++-- .../primitives/runtime-interface/proc-macro/Cargo.toml | 4 ++-- substrate/primitives/runtime/Cargo.toml | 4 ++-- substrate/primitives/staking/Cargo.toml | 2 +- substrate/primitives/state-machine/Cargo.toml | 2 +- substrate/primitives/statement-store/Cargo.toml | 2 +- substrate/primitives/storage/Cargo.toml | 2 +- substrate/primitives/test-primitives/Cargo.toml | 2 +- substrate/primitives/timestamp/Cargo.toml | 2 +- substrate/primitives/trie/Cargo.toml | 2 +- substrate/primitives/version/Cargo.toml | 4 ++-- substrate/primitives/version/proc-macro/Cargo.toml | 4 ++-- substrate/primitives/weights/Cargo.toml | 2 +- substrate/test-utils/client/Cargo.toml | 4 ++-- substrate/test-utils/runtime/Cargo.toml | 4 ++-- substrate/test-utils/runtime/transaction-pool/Cargo.toml | 2 +- substrate/utils/frame/benchmarking-cli/Cargo.toml | 6 +++--- substrate/utils/frame/remote-externalities/Cargo.toml | 2 +- substrate/utils/frame/rpc/client/Cargo.toml | 2 +- .../utils/frame/rpc/state-trie-migration-rpc/Cargo.toml | 4 ++-- substrate/utils/frame/rpc/support/Cargo.toml | 2 +- substrate/utils/frame/try-runtime/cli/Cargo.toml | 4 ++-- substrate/utils/prometheus/Cargo.toml | 2 +- 185 files changed, 283 insertions(+), 274 deletions(-) diff --git a/.config/taplo.toml b/.config/taplo.toml index a45204923cff..2c6ccfb2b344 100644 --- a/.config/taplo.toml +++ b/.config/taplo.toml @@ -7,6 +7,7 @@ exclude = [ "polkadot/node/malus/integrationtests/**", "polkadot/zombienet_tests/**", "substrate/zombienet/**", + "target/**", ] # global rules diff --git a/Cargo.toml b/Cargo.toml index d09f19f280cb..7e6667559229 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -537,6 +537,14 @@ default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic polkavm-linker = "0.8.2" polkavm-derive = "0.8.0" log = { version = "0.4.20", default-features = false } +quote = { version = "1.0.33" } +serde = { version = "1.0.196", default-features = false } +serde-big-array = { version = "0.3.2" } +serde_derive = { version = "1.0.117" } +serde_json = { version = "1.0.113", default-features = false } +serde_yaml = { version = "0.9" } +syn = { version = "2.0.49" } +thiserror = { version = "1.0.48" } [profile.release] # Polkadot runtime requires unwinding. diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 828b567bb947..205b593365ef 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } finality-grandpa = { version = "0.16.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 54aae4a2f76a..8aa6b4b05e5e 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Bridge dependencies diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index e667c283a079..c0dae684b5f2 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -13,7 +13,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } parity-util-mem = { version = "0.12.0", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Bridge Dependencies diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 2ea31c26d6b7..22206fb2c376 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -16,7 +16,7 @@ impl-trait-for-tuples = "0.2.2" log = { workspace = true } num-traits = { version = "0.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } # Substrate Dependencies diff --git a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml index 541ded26737b..99b429053114 100644 --- a/bridges/snowbridge/pallets/ethereum-client/Cargo.toml +++ b/bridges/snowbridge/pallets/ethereum-client/Cargo.toml @@ -15,8 +15,8 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", optional = true } -serde_json = { version = "1.0.113", optional = true } +serde = { optional = true, workspace = true, default-features = true } +serde_json = { optional = true, workspace = true, default-features = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ssz_rs = { version = "0.9.0", default-features = false } @@ -45,12 +45,12 @@ pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-fea [dev-dependencies] rand = "0.8.5" sp-keyring = { path = "../../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } hex-literal = "0.4.1" pallet-timestamp = { path = "../../../../substrate/frame/timestamp" } snowbridge-pallet-ethereum-client-fixtures = { path = "./fixtures" } sp-io = { path = "../../../../substrate/primitives/io" } -serde = "1.0.196" +serde = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml index 09653fa3b480..c26ef90a22db 100644 --- a/bridges/snowbridge/pallets/inbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/inbound-queue/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } diff --git a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml index 0956ac76678d..40e57db4bbd9 100644 --- a/bridges/snowbridge/pallets/outbound-queue/Cargo.toml +++ b/bridges/snowbridge/pallets/outbound-queue/Cargo.toml @@ -15,7 +15,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false } +serde = { features = ["alloc", "derive"], workspace = true } codec = { version = "3.6.1", package = "parity-scale-codec", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1", optional = true } diff --git a/bridges/snowbridge/primitives/beacon/Cargo.toml b/bridges/snowbridge/primitives/beacon/Cargo.toml index 4f5213b3422b..e021bb010714 100644 --- a/bridges/snowbridge/primitives/beacon/Cargo.toml +++ b/bridges/snowbridge/primitives/beacon/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } hex = { version = "0.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } diff --git a/bridges/snowbridge/primitives/core/Cargo.toml b/bridges/snowbridge/primitives/core/Cargo.toml index 460c6c3442ec..090c48c403fe 100644 --- a/bridges/snowbridge/primitives/core/Cargo.toml +++ b/bridges/snowbridge/primitives/core/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["alloc", "derive"], default-features = false } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } hex-literal = { version = "0.4.1" } diff --git a/bridges/snowbridge/primitives/ethereum/Cargo.toml b/bridges/snowbridge/primitives/ethereum/Cargo.toml index a9e2fe654212..399139cef381 100644 --- a/bridges/snowbridge/primitives/ethereum/Cargo.toml +++ b/bridges/snowbridge/primitives/ethereum/Cargo.toml @@ -12,8 +12,8 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } -serde-big-array = { version = "0.3.2", optional = true, features = ["const-generics"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +serde-big-array = { optional = true, features = ["const-generics"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } ethbloom = { version = "0.13.0", default-features = false } @@ -33,7 +33,7 @@ ethabi = { package = "ethabi-decode", version = "1.0.0", default-features = fals [dev-dependencies] wasm-bindgen-test = "0.3.19" rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/bridges/snowbridge/primitives/router/Cargo.toml b/bridges/snowbridge/primitives/router/Cargo.toml index c4fc688a946c..750a2a73e637 100644 --- a/bridges/snowbridge/primitives/router/Cargo.toml +++ b/bridges/snowbridge/primitives/router/Cargo.toml @@ -12,7 +12,7 @@ categories = ["cryptography::cryptocurrencies"] workspace = true [dependencies] -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } log = { workspace = true } diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 868f72b7b86e..ec4466b34265 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -15,7 +15,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = "1.11.0" # Substrate diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml index 8a559c603f33..b37232bb4485 100644 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ b/cumulus/client/consensus/proposer/Cargo.toml @@ -12,7 +12,7 @@ workspace = true [dependencies] anyhow = "1.0" async-trait = "0.1.74" -thiserror = "1.0.48" +thiserror = { workspace = true } # Substrate sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 5cdfff23ae17..6e652b892104 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -21,6 +21,6 @@ sc-client-api = { path = "../../../substrate/client/api" } futures = "0.3.28" async-trait = "0.1.74" -thiserror = "1.0.48" +thiserror = { workspace = true } jsonrpsee-core = "0.22" parity-scale-codec = "3.6.4" diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 1534537e0d42..801712b1ad15 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -37,12 +37,12 @@ jsonrpsee = { version = "0.22", features = ["ws-client"] } tracing = "0.1.37" async-trait = "0.1.74" url = "2.4.0" -serde_json = "1.0.113" -serde = "1.0.196" +serde_json = { workspace = true, default-features = true } +serde = { workspace = true, default-features = true } schnellru = "0.2.1" smoldot = { version = "0.11.0", default_features = false, features = ["std"] } smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } either = "1.8.1" -thiserror = "1.0.48" +thiserror = { workspace = true } rand = "0.8.5" pin-project = "1.1.3" diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 517374527398..0a90c30e0331 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -13,9 +13,9 @@ workspace = true proc-macro = true [dependencies] -syn = "2.0.49" +syn = { workspace = true } proc-macro2 = "1.0.64" -quote = "1.0.33" +quote = { workspace = true } proc-macro-crate = "3.0.0" [features] diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 77b484913b82..0ef678c4cbae 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -17,10 +17,10 @@ workspace = true clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } codec = { package = "parity-scale-codec", version = "3.0.0" } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.28" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } # Local parachain-template-runtime = { path = "../runtime" } diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/cumulus/parachain-template/pallets/template/Cargo.toml index bd9d21d2dd29..04858a161fa4 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/cumulus/parachain-template/pallets/template/Cargo.toml @@ -24,7 +24,7 @@ frame-support = { path = "../../../../substrate/frame/support", default-features frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] -serde = { version = "1.0.196" } +serde = { workspace = true, default-features = true } # Substrate sp-core = { path = "../../../../substrate/primitives/core", default-features = false } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 02e51782700b..7a1951fd24bd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -21,7 +21,7 @@ log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 6015251a238a..8623f7cb366e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml index 4c141fc691c8..0bc3b510ed50 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml index 135ce95f30f9..a7d52dfd7849 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = "0.4.1" log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml index 8ff031b3e335..c0b8fb7636b5 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml @@ -15,7 +15,7 @@ enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml index a08b97021605..e87e825a34e8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml @@ -15,7 +15,7 @@ enumflags2 = { version = "0.7.7" } hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.9.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index d5624941c037..84a232e954fc 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -21,8 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" hex-literal = "0.4.1" log = { workspace = true, default-features = true } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 66bc9c666978..b26f0b9967cf 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -19,8 +19,8 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.22", features = ["server"] } rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } tokio = { version = "1.32.0", features = ["macros"] } tracing = "0.1.37" url = "2.4.0" diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index ee2673ac20ff..b9232f95981b 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -21,7 +21,7 @@ crate-type = ["cdylib", "rlib"] cfg-if = "1.0" clap = { version = "4.5.1", features = ["derive"], optional = true } log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } futures = "0.3.21" pyro = { package = "pyroscope", version = "0.5.3", optional = true } pyroscope_pprofrs = { version = "0.2", optional = true } diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index 8705c14c3a4d..677f15c4b9a1 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -16,7 +16,7 @@ novelpoly = { package = "reed-solomon-novelpoly", version = "2.0.0" } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } -thiserror = "1.0.48" +thiserror = { workspace = true } [dev-dependencies] criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] } diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index 8c6644ad6da5..8df0c2b1edae 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } -thiserror = "1.0.48" +thiserror = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } [dev-dependencies] diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 1010779a1b36..f6d89dbc1528 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -20,7 +20,7 @@ merlin = "3.0" schnorrkel = "0.11.4" kvdb = "0.13.0" derive_more = "0.99.17" -thiserror = "1.0.48" +thiserror = { workspace = true } itertools = "0.10.5" polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 4f5c18a68d6c..05212da74798 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.21" futures-timer = "3.0.2" kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } bitvec = "1.0.0" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 1c69fc441b32..b0cf041e38da 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -20,7 +20,7 @@ erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" [dev-dependencies] diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index f42e8f8d22ba..6ecfffd7249b 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -17,7 +17,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } wasm-timer = "0.2.5" -thiserror = "1.0.48" +thiserror = { workspace = true } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml index 27c2df85235e..96fd42785cdd 100644 --- a/polkadot/node/core/chain-selection/Cargo.toml +++ b/polkadot/node/core/chain-selection/Cargo.toml @@ -18,7 +18,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } parity-scale-codec = "3.6.1" [dev-dependencies] diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 8a414a35f39c..1fff0a771706 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.1" kvdb = "0.13.0" -thiserror = "1.0.48" +thiserror = { workspace = true } schnellru = "0.2.1" fatality = "0.0.6" diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 3b2fa634102d..24da4dc1e316 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 5f0b2c0fdc96..5b62d90c1d4f 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -13,7 +13,7 @@ workspace = true futures = "0.3.19" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" bitvec = "1" diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 175980e90a05..24cdfd6b57b3 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -thiserror = "1.0.48" +thiserror = { workspace = true } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } diff --git a/polkadot/node/core/pvf-checker/Cargo.toml b/polkadot/node/core/pvf-checker/Cargo.toml index 3f02c2fa74a8..f4f954e316c0 100644 --- a/polkadot/node/core/pvf-checker/Cargo.toml +++ b/polkadot/node/core/pvf-checker/Cargo.toml @@ -11,7 +11,7 @@ workspace = true [dependencies] futures = "0.3.21" -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../../gum" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index d7ea573cfd2f..9ed64b88ffde 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -23,7 +23,7 @@ pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" tempfile = "3.3.0" -thiserror = "1.0.31" +thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 631d4335b522..56bad9792fa0 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -15,7 +15,7 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" -thiserror = "1.0.31" +thiserror = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 4c81c8314df9..70126b4f4336 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.49", features = ["extra-traits", "full"] } -quote = "1.0.28" +syn = { features = ["extra-traits", "full"], workspace = true } +quote = { workspace = true } proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" expander = "2.0.0" diff --git a/polkadot/node/jaeger/Cargo.toml b/polkadot/node/jaeger/Cargo.toml index c8fd3d462834..23ab8f842108 100644 --- a/polkadot/node/jaeger/Cargo.toml +++ b/polkadot/node/jaeger/Cargo.toml @@ -17,7 +17,7 @@ polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } sc-network = { path = "../../../substrate/client/network" } sp-core = { path = "../../../substrate/primitives/core" } -thiserror = "1.0.48" +thiserror = { workspace = true } tokio = "1.24.2" log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", default-features = false } diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index a1484429ed63..432501ed23fb 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -21,7 +21,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-primitives = { path = "../../primitives" } sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.48" +thiserror = { workspace = true } rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index e86445730ffc..9eddf5c86d2e 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -15,7 +15,7 @@ tokio = "1.24.2" schnellru = "0.2.1" rand = "0.8.5" fatality = "0.0.6" -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 0cdd7bfbcb13..2e889fc30eb9 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -25,7 +25,7 @@ polkadot-overseer = { path = "../../overseer" } parking_lot = "0.12.1" bytes = "1" fatality = "0.0.6" -thiserror = "1" +thiserror = { workspace = true } [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index f88e8182ecca..f0f8be0f7bab 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -25,7 +25,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } fatality = "0.0.6" -thiserror = "1.0.48" +thiserror = { workspace = true } tokio-util = "0.7.1" [dev-dependencies] diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 319b743aa586..14d59d04f2bf 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-primitives = { path = "../../primitives" } sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" schnellru = "0.2.1" indexmap = "2.0.0" diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index a8b0cf2737ea..7efa0b8ca820 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -21,7 +21,7 @@ sc-network = { path = "../../../../substrate/client/network" } sc-authority-discovery = { path = "../../../../substrate/client/authority-discovery" } strum = { version = "0.24", features = ["derive"] } futures = "0.3.21" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" rand = "0.8" derive_more = "0.99" diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index b07d84390dcd..01f7d818c1c5 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -23,7 +23,7 @@ polkadot-node-network-protocol = { path = "../protocol" } arrayvec = "0.7.4" indexmap = "2.0.0" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" bitvec = "1" diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index b5560929b2eb..b4541bcc346c 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -22,9 +22,9 @@ sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compres sp-runtime = { path = "../../../substrate/primitives/runtime" } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } schnorrkel = "0.11.4" -thiserror = "1.0.48" +thiserror = { workspace = true } bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index dc6acd506c84..8fd9f20b7bcf 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -85,9 +85,9 @@ is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } log = { workspace = true, default-features = true } schnellru = "0.2.1" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.12", optional = true } diff --git a/polkadot/node/subsystem-bench/Cargo.toml b/polkadot/node/subsystem-bench/Cargo.toml index 203cfe05db1b..7de91d8cd5de 100644 --- a/polkadot/node/subsystem-bench/Cargo.toml +++ b/polkadot/node/subsystem-bench/Cargo.toml @@ -65,8 +65,8 @@ itertools = "0.11.0" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } prometheus_endpoint = { package = "substrate-prometheus-endpoint", path = "../../../substrate/utils/prometheus" } prometheus = { version = "0.13.0", default-features = false } -serde = "1.0.196" -serde_yaml = "0.9" +serde = { workspace = true, default-features = true } +serde_yaml = { workspace = true } polkadot-node-core-approval-voting = { path = "../core/approval-voting" } polkadot-approval-distribution = { path = "../network/approval-distribution" } diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index ffd05cca2e93..54c8f7e2ade7 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -28,6 +28,6 @@ sc-client-api = { path = "../../../substrate/client/api" } sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } smallvec = "1.8.0" substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } -thiserror = "1.0.48" +thiserror = { workspace = true } async-trait = "0.1.74" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index b2a643d92f69..a668f8de76a0 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -18,7 +18,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" -thiserror = "1.0.48" +thiserror = { workspace = true } fatality = "0.0.6" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 47a754892ed7..e7892abcd87b 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" hex = "0.4.3" gum = { package = "tracing-gum", path = "../../gum" } rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tempfile = "3.2.0" tokio = "1.24.2" diff --git a/polkadot/node/zombienet-backchannel/Cargo.toml b/polkadot/node/zombienet-backchannel/Cargo.toml index ec0521a592dc..fa99490a9974 100644 --- a/polkadot/node/zombienet-backchannel/Cargo.toml +++ b/polkadot/node/zombienet-backchannel/Cargo.toml @@ -19,7 +19,7 @@ futures-util = "0.3.30" lazy_static = "1.4.0" parity-scale-codec = { version = "3.6.1", features = ["derive"] } reqwest = { version = "0.11", features = ["rustls-tls"], default-features = false } -thiserror = "1.0.48" +thiserror = { workspace = true } gum = { package = "tracing-gum", path = "../gum" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 843c4fe3e952..d8c3cea7ad8b 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -24,7 +24,7 @@ derive_more = "0.99.11" bounded-collections = { version = "0.2.0", default-features = false, features = ["serde"] } # all optional crates. -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } [features] default = ["std"] diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index fa3e949d691b..58d3d1001610 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -14,7 +14,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "se hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 52fbfd9ac77b..2467d123f02f 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc"] } -serde_derive = { version = "1.0.117" } +serde = { features = ["alloc"], workspace = true } +serde_derive = { workspace = true } static_assertions = "1.1.0" sp-api = { path = "../../../substrate/primitives/api", default-features = false } @@ -69,7 +69,7 @@ pallet-babe = { path = "../../../substrate/frame/babe" } pallet-treasury = { path = "../../../substrate/frame/treasury" } sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } libsecp256k1 = "0.7.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 1de34400758f..311a62b6c917 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -16,7 +16,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } derive_more = "0.99.17" bitflags = "1.3.2" @@ -69,7 +69,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } thousands = "0.2.0" assert_matches = "1" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 68ded4aeaece..3dc59cc17281 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -14,8 +14,8 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } static_assertions = "1.1.0" smallvec = "1.8.0" @@ -111,7 +111,7 @@ keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyrin remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 1f041fdfeaa5..9753a4093045 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } @@ -74,7 +74,7 @@ hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } sp-trie = { path = "../../../substrate/primitives/trie" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index 31b3f8351d88..4180828bcfb1 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -16,8 +16,8 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.196", default-features = false } -serde_derive = { version = "1.0.117", optional = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } @@ -119,7 +119,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 888757150b45..f9ccfb9833a6 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -18,7 +18,7 @@ log = { workspace = true } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive", "rc"] } +serde = { features = ["alloc", "derive", "rc"], workspace = true } schemars = { version = "0.8.13", default-features = true, optional = true } xcm-procedural = { path = "procedural" } environmental = { version = "1.1.4", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index ac39e5f74292..4840b6127f55 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,7 +13,7 @@ workspace = true bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 1af4d2db4f15..ca9fb351bd3c 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -15,8 +15,8 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } Inflector = "0.11.4" [dev-dependencies] diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml index 02b3b0a735bf..65644861c9ac 100644 --- a/substrate/bin/minimal/node/Cargo.toml +++ b/substrate/bin/minimal/node/Cargo.toml @@ -24,7 +24,7 @@ clap = { version = "4.5.1", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.22", features = ["server"] } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-cli = { path = "../../../client/cli" } sc-executor = { path = "../../../client/executor" } diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index 7a6955d2cec3..9cba2b5a3660 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -22,7 +22,7 @@ name = "node-template" [dependencies] clap = { version = "4.5.1", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"] } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-cli = { path = "../../../client/cli" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 95d92e6eadf0..86bab0ca375a 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -42,7 +42,7 @@ sp-std = { path = "../../../primitives/std", default-features = false } sp-storage = { path = "../../../primitives/storage", default-features = false } sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } +serde_json = { features = ["alloc"], workspace = true } sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index ada58c79c77c..8e0f7fb93b39 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -24,8 +24,8 @@ kitchensink-runtime = { path = "../runtime" } sc-client-api = { path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" kvdb-rocksdb = "0.19.0" diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index aed2101a3f59..abe284c41da1 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -43,7 +43,7 @@ crate-type = ["cdylib", "rlib"] array-bytes = "6.1" clap = { version = "4.5.1", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } jsonrpsee = { version = "0.22", features = ["server"] } futures = "0.3.21" log = { workspace = true, default-features = true } @@ -116,7 +116,7 @@ sc-cli = { path = "../../../client/cli", optional = true } frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } [dev-dependencies] sc-keystore = { path = "../../../client/keystore" } @@ -159,7 +159,7 @@ sp-consensus-babe = { path = "../../../primitives/consensus/babe" } sp-externalities = { path = "../../../primitives/externalities" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } scale-info = { version = "2.10.0", features = ["derive", "serde"] } sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 6c9be0fe1acb..7db6e3efd3a0 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.5.1", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } -thiserror = "1.0" +thiserror = { workspace = true } sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } sc-service = { path = "../../../client/service", default-features = false } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 7bc608ba89e1..aa13c3d292be 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -26,7 +26,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } static_assertions = "1.1.0" log = { workspace = true } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc", "arbitrary_precision"] } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } # pallet-asset-conversion: turn on "num-traits" feature primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 8b3180fbbcd1..996372c8a0f8 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -26,5 +26,5 @@ crate-type = ["rlib"] clap = { version = "4.5.1", features = ["derive"] } log = { workspace = true, default-features = true } sc-chain-spec = { path = "../../../client/chain-spec" } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-tracing = { path = "../../../primitives/tracing" } diff --git a/substrate/client/allocator/Cargo.toml b/substrate/client/allocator/Cargo.toml index 662125814fbe..2c268b548ea9 100644 --- a/substrate/client/allocator/Cargo.toml +++ b/substrate/client/allocator/Cargo.toml @@ -18,6 +18,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } sp-core = { path = "../../primitives/core" } sp-wasm-interface = { path = "../../primitives/wasm-interface" } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 09f60a476702..cd7b613e277f 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -41,6 +41,6 @@ sp-storage = { path = "../../primitives/storage" } sp-trie = { path = "../../primitives/trie" } [dev-dependencies] -thiserror = "1.0.48" +thiserror = { workspace = true } sp-test-primitives = { path = "../../primitives/test-primitives" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 78f7144f4368..cdd4052f0b09 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -32,7 +32,7 @@ multihash = { version = "0.18.1", default-features = false, features = [ log = { workspace = true, default-features = true } prost = "0.12" rand = "0.8.5" -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } sc-network = { path = "../network" } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 82a5f7f55b04..f2138c07d71a 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } memmap2 = "0.9.3" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../api" } sc-chain-spec-derive = { path = "derive" } sc-executor = { path = "../executor" } diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 4e95206df6b5..521eee578eca 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 1b4d3733baa0..0582018283c6 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -29,9 +29,9 @@ parity-scale-codec = "3.6.1" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" -serde = "1.0.196" -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } bip39 = "2.0.0" tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } sc-client-api = { path = "../api" } diff --git a/substrate/client/consensus/aura/Cargo.toml b/substrate/client/consensus/aura/Cargo.toml index d1076da35f9d..213f75974da0 100644 --- a/substrate/client/consensus/aura/Cargo.toml +++ b/substrate/client/consensus/aura/Cargo.toml @@ -20,7 +20,7 @@ async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = { workspace = true, default-features = true } -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-block-builder = { path = "../../block-builder" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index 9bfed7e541b2..c98fb7112b7c 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -25,7 +25,7 @@ num-bigint = "0.4.3" num-rational = "0.4.1" num-traits = "0.2.17" parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index 342c9259cc66..043b566673e7 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } futures = "0.3.21" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-consensus-babe = { path = ".." } sc-consensus-epochs = { path = "../../epochs" } sc-rpc-api = { path = "../../../rpc-api" } @@ -33,7 +33,7 @@ sp-keystore = { path = "../../../../primitives/keystore" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = "1.22.0" sc-consensus = { path = "../../common" } sc-keystore = { path = "../../../keystore" } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index d6b8a57b9884..8552a4900223 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -20,7 +20,7 @@ fnv = "1.0.6" futures = "0.3" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } wasm-timer = "0.2.5" prometheus = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } @@ -44,7 +44,7 @@ tokio = "1.22.0" [dev-dependencies] -serde = "1.0.196" +serde = { workspace = true, default-features = true } tempfile = "3.1.0" sc-block-builder = { path = "../../block-builder" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index e83c95ab106b..bb2ae4a08966 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -17,8 +17,8 @@ futures = "0.3.21" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-consensus-beefy = { path = ".." } sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } sc-rpc = { path = "../../../rpc" } @@ -26,7 +26,7 @@ sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index 496e6c82740c..7f36dfc09ef8 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -23,8 +23,8 @@ libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = { workspace = true, default-features = true } mockall = "0.11.3" parking_lot = "0.12.1" -serde = { version = "1.0", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-utils = { path = "../../utils" } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index e342377d820a..1ab953525220 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -28,8 +28,8 @@ log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.5" -serde_json = "1.0.113" -thiserror = "1.0" +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-block-builder = { path = "../../block-builder" } @@ -57,7 +57,7 @@ sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } -serde = "1.0.196" +serde = { workspace = true, default-features = true } tokio = "1.22.0" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index ca0c2ac371af..f7e87415448e 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -18,8 +18,8 @@ futures = "0.3.16" jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } log = { workspace = true, default-features = true } parity-scale-codec = { version = "3.6.1", features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sc-client-api = { path = "../../../api" } sc-consensus-grandpa = { path = ".." } sc-rpc = { path = "../../../rpc" } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 51745c996aa9..ac32fed72289 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -23,8 +23,8 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" futures-timer = "3.0.1" log = { workspace = true, default-features = true } -serde = { version = "1.0", features = ["derive"] } -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/consensus/pow/Cargo.toml b/substrate/client/consensus/pow/Cargo.toml index 6caccb9879dd..0791514035b4 100644 --- a/substrate/client/consensus/pow/Cargo.toml +++ b/substrate/client/consensus/pow/Cargo.toml @@ -22,7 +22,7 @@ futures = "0.3.21" futures-timer = "3.0.1" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } sc-consensus = { path = "../common" } diff --git a/substrate/client/executor/common/Cargo.toml b/substrate/client/executor/common/Cargo.toml index 648fb9f0f504..bfd1fa6b7401 100644 --- a/substrate/client/executor/common/Cargo.toml +++ b/substrate/client/executor/common/Cargo.toml @@ -17,7 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -thiserror = "1.0.48" +thiserror = { workspace = true } wasm-instrument = "0.4" sc-allocator = { path = "../../allocator" } sp-maybe-compressed-blob = { path = "../../../primitives/maybe-compressed-blob" } diff --git a/substrate/client/keystore/Cargo.toml b/substrate/client/keystore/Cargo.toml index f7f3b138ec89..908e0aa8f38c 100644 --- a/substrate/client/keystore/Cargo.toml +++ b/substrate/client/keystore/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" parking_lot = "0.12.1" -serde_json = "1.0.113" -thiserror = "1.0" +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sp-application-crypto = { path = "../../primitives/application-crypto" } sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index 901c573b6bd6..9b391b76ea00 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } @@ -25,4 +25,4 @@ sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 8bcd963bd56f..736184f4668c 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -37,4 +37,4 @@ sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-mixnet = { path = "../../primitives/mixnet" } sp-runtime = { path = "../../primitives/runtime" } -thiserror = "1.0" +thiserror = { workspace = true } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index d2ca8ba8c3f2..cbf74440dc1a 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -36,10 +36,10 @@ parking_lot = "0.12.1" partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } smallvec = "1.11.0" -thiserror = "1.0" +thiserror = { workspace = true } tokio = { version = "1.22.0", features = ["macros", "sync"] } tokio-stream = "0.1.7" unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 63544b069e51..7ef3ea212427 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -25,7 +25,7 @@ futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = { workspace = true, default-features = true } prost = "0.12" -thiserror = "1.0" +thiserror = { workspace = true } unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } sc-client-api = { path = "../../api" } sc-network = { path = ".." } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index a83be7538bef..c757f727fb71 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -33,4 +33,4 @@ sc-client-api = { path = "../../api" } sc-network = { path = ".." } sp-core = { path = "../../../primitives/core" } sp-runtime = { path = "../../../primitives/runtime" } -thiserror = "1.0" +thiserror = { workspace = true } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index 19f86e87ac50..32ba3b6356c0 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -31,7 +31,7 @@ mockall = "0.11.3" prost = "0.12" schnellru = "0.2.1" smallvec = "1.11.0" -thiserror = "1.0" +thiserror = { workspace = true } tokio-stream = "0.1.14" tokio = { version = "1.32.0", features = ["macros", "time"] } fork-tree = { path = "../../../utils/fork-tree" } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index 27f8a1b90624..1b7af6a4a52f 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -18,9 +18,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-chain-spec = { path = "../chain-spec" } sc-mixnet = { path = "../mixnet" } sc-transaction-pool-api = { path = "../transaction-pool/api" } diff --git a/substrate/client/rpc-servers/Cargo.toml b/substrate/client/rpc-servers/Cargo.toml index 3e04b927a222..7f7a86799e0c 100644 --- a/substrate/client/rpc-servers/Cargo.toml +++ b/substrate/client/rpc-servers/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } tower-http = { version = "0.4.0", features = ["cors"] } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index cb109becaf26..c62b3e789d38 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -31,8 +31,8 @@ sc-client-api = { path = "../api" } sc-utils = { path = "../utils" } sc-rpc = { path = "../rpc" } codec = { package = "parity-scale-codec", version = "3.6.1" } -thiserror = "1.0" -serde = "1.0" +thiserror = { workspace = true } +serde = { workspace = true, default-features = true } hex = "0.4" futures = "0.3.21" parking_lot = "0.12.1" @@ -44,7 +44,7 @@ futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } substrate-test-runtime = { path = "../../test-utils/runtime" } diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml index aa97eb87bb59..f65e6c9a59ec 100644 --- a/substrate/client/rpc/Cargo.toml +++ b/substrate/client/rpc/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3.21" jsonrpsee = { version = "0.22", features = ["server"] } log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sc-block-builder = { path = "../block-builder" } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index a6fa360b74f8..73edceb2ef36 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -29,7 +29,7 @@ runtime-benchmarks = [ [dependencies] jsonrpsee = { version = "0.22", features = ["server"] } -thiserror = "1.0.48" +thiserror = { workspace = true } futures = "0.3.21" rand = "0.8.5" parking_lot = "0.12.1" @@ -37,8 +37,8 @@ log = { workspace = true, default-features = true } futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-keystore = { path = "../keystore" } sp-runtime = { path = "../../primitives/runtime" } sp-trie = { path = "../../primitives/trie" } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index b0f046441421..b2120b3efc43 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -17,4 +17,4 @@ log = { workspace = true, default-features = true } fs4 = "0.7.0" sp-core = { path = "../../primitives/core" } tokio = { version = "1.22.0", features = ["time"] } -thiserror = "1.0.48" +thiserror = { workspace = true } diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index fc0efd6630ca..09dc611caa04 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["client-core", "macros", "server"] } -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sc-consensus-babe = { path = "../consensus/babe" } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index ede0bccaedc8..ba58452ffb58 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -24,8 +24,8 @@ rand = "0.8.5" rand_pcg = "0.3.1" derive_more = "0.99" regex = "1" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-telemetry = { path = "../telemetry" } sp-core = { path = "../../primitives/core" } sp-crypto-hashing = { path = "../../primitives/crypto/hashing" } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index e35fbc16142e..8ab00202f0ba 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -25,7 +25,7 @@ parking_lot = "0.12.1" pin-project = "1.0.12" sc-utils = { path = "../utils" } rand = "0.8.5" -serde = { version = "1.0.196", features = ["derive"] } -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } wasm-timer = "0.2.5" diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index bd5a661bb3a3..61e6f7d0bab5 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -26,8 +26,8 @@ log = { workspace = true, default-features = true } parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" -serde = "1.0.196" -thiserror = "1.0.48" +serde = { workspace = true, default-features = true } +thiserror = { workspace = true } tracing = "0.1.29" tracing-log = "0.1.3" tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index aa6ae7d6fd4a..fec34aa0bca9 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -20,5 +20,5 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.49", features = ["extra-traits", "full", "parsing", "proc-macro"] } +quote = { features = ["proc-macro"], workspace = true } +syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index c7a2540f8ed4..2ca37afd61b8 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -23,8 +23,8 @@ futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } parking_lot = "0.12.1" -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } sc-transaction-pool-api = { path = "api" } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index 79449b2ee87c..d52e4783fabc 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -16,11 +16,11 @@ async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = { workspace = true, default-features = true } -serde = { version = "1.0.196", features = ["derive"] } -thiserror = "1.0.48" +serde = { features = ["derive"], workspace = true, default-features = true } +thiserror = { workspace = true } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 25290890fcdd..177077317731 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -16,7 +16,7 @@ array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 866b032c3313..e38eaa6fb078 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -15,7 +15,7 @@ workspace = true codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-authorship = { path = "../authorship", default-features = false } diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 8be7a500d3fe..bf42aae979cd 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -21,7 +21,7 @@ linregress = { version = "0.5.1", optional = true } log = { workspace = true } paste = "1.0" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-support-procedural = { path = "../support/procedural", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 0b127a5a4548..be3bafcd23fa 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -25,7 +25,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 7e4a6c586668..4080cd0442db 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -19,5 +19,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full"] } +quote = { workspace = true } +syn = { features = ["full"], workspace = true } diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index d4921bf7b4be..ff5af995026f 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "max-encoded-len", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index e1e8964dd9cf..9a55cda5340c 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index b5d6d1dca54e..1bf1165229a7 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -18,8 +18,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.49", features = ["full", "visit"] } -quote = "1.0.28" +syn = { features = ["full", "visit"], workspace = true } +quote = { workspace = true } proc-macro2 = "1.0.56" proc-macro-crate = "3.0.0" diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 104660a98866..8d9da7df39ea 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true, features = ["derive"] } +serde = { optional = true, features = ["derive"], workspace = true, default-features = true } log = { workspace = true } environmental = { version = "1.1.4", default-features = false } diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index e0334b6692dc..d1bb01dde1a4 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -22,7 +22,7 @@ frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } sp-io = { default-features = false, path = "../../primitives/io" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index e1bf71f70dfb..b3ef4671ce56 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } pallet-balances = { path = "../balances", default-features = false } diff --git a/substrate/frame/parameters/Cargo.toml b/substrate/frame/parameters/Cargo.toml index 82059e3a73dc..07ebeea52d52 100644 --- a/substrate/frame/parameters/Cargo.toml +++ b/substrate/frame/parameters/Cargo.toml @@ -11,7 +11,7 @@ edition.workspace = true codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } paste = { version = "1.0.14", default-features = false } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } docify = "0.2.5" frame-support = { path = "../support", default-features = false, features = ["experimental"] } diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 13c8cfc59617..2dfb25a2fd3a 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index c5f39a649ecd..45710f0539e2 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 51aa8c79fada..d2a46146931b 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index bae730f446fa..e2a2782db2da 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -20,8 +20,8 @@ proc-macro = true [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "visit"] } +quote = { workspace = true } +syn = { features = ["full", "visit"], workspace = true } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 5f86fb1595a1..e837956613ed 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 73abc1a6aed0..72e2d0bfa569 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } @@ -46,7 +46,7 @@ sp-crypto-hashing-proc-macro = { path = "../../primitives/crypto/hashing/proc-ma k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } +serde_json = { features = ["alloc"], workspace = true } docify = "0.2.7" static_assertions = "1.1.0" diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index f3434bd24916..859475038020 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -23,8 +23,8 @@ Inflector = "0.11.4" cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "visit-mut"] } +quote = { workspace = true } +syn = { features = ["full", "visit-mut"], workspace = true } frame-support-procedural-tools = { path = "tools" } macro_magic = { version = "0.5.0", features = ["proc_support"] } proc-macro-warning = { version = "1.0.0", default-features = false } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 9be5ca0bfd4c..a75307aca79b 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -17,6 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "full", "visit"], workspace = true } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index bb9935ba58ca..b39d99a822fb 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -19,5 +19,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" -quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.49", features = ["extra-traits", "full", "parsing", "proc-macro"] } +quote = { features = ["proc-macro"], workspace = true } +syn = { features = ["extra-traits", "full", "parsing", "proc-macro"], workspace = true } diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 1dbf46109c87..ae2c56a531fd 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 94afab76d89e..ca889faef876 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"] } +serde = { features = ["derive"], workspace = true } frame-support = { path = "../..", default-features = false } frame-system = { path = "../../../system", default-features = false } sp-runtime = { path = "../../../../primitives/runtime", default-features = false } diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index b852aa99b97a..416969e9c477 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -20,7 +20,7 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"] } +serde = { features = ["alloc", "derive"], workspace = true } frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } sp-io = { path = "../../primitives/io", default-features = false } diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index ae308cf67f01..7339cf0a8cce 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 56c1cc4caa7d..275e1c01f927 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } @@ -29,7 +29,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } pallet-balances = { path = "../balances" } [features] diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 9a80081e04f6..1da3237df081 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -30,10 +30,10 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } sp-storage = { path = "../../../primitives/storage", default-features = false } diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index 59b6e578571f..1386d9b5a569 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", optional = true } +serde = { optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index edc68b386bca..5f90904123d9 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = docify = "0.2.7" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true, default-features = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index 3330d2a88de9..f4b1d13c5203 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -27,7 +27,7 @@ sp-version = { path = "../version", default-features = false } sp-state-machine = { path = "../state-machine", default-features = false, optional = true } sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index f0644eea8f45..f5406758687a 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "3.0.0" diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index cf57aa4b76d2..6f90a2b6262e 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -21,7 +21,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-std = { path = "../std", default-features = false } sp-io = { path = "../io", default-features = false } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 92f9d23620ef..301821ad6893 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = integer-sqrt = "0.1.2" num-traits = { version = "0.2.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/blockchain/Cargo.toml b/substrate/primitives/blockchain/Cargo.toml index 26ba41645e52..9d13d627eebc 100644 --- a/substrate/primitives/blockchain/Cargo.toml +++ b/substrate/primitives/blockchain/Cargo.toml @@ -22,7 +22,7 @@ futures = "0.3.21" log = { workspace = true, default-features = true } parking_lot = "0.12.1" schnellru = "0.2.1" -thiserror = "1.0.48" +thiserror = { workspace = true } sp-api = { path = "../api" } sp-consensus = { path = "../consensus/common" } sp-database = { path = "../database" } diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index f6bc94e3adfb..8b3006f79a7f 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 38fc67b7f526..8ab817d52ef9 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index cbca0d94d3e5..048e31b0265f 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.74" futures = { version = "0.3.21", features = ["thread-pool"] } log = { workspace = true, default-features = true } -thiserror = "1.0.48" +thiserror = { workspace = true } sp-core = { path = "../../core" } sp-inherents = { path = "../../inherents" } sp-runtime = { path = "../../runtime" } diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index b556b43bc950..b06208a4308b 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { workspace = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false, optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 4c908201127b..b707ad18b5b9 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } sp-consensus-slots = { path = "../slots", default-features = false } diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index fadcd1215eee..8372b2b04a6b 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-std = { path = "../../std", default-features = false } sp-timestamp = { path = "../../timestamp", default-features = false } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index c06dd9197ed7..8fcabfeb2384 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { workspace = true } -serde = { version = "1.0.196", optional = true, default-features = false, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } bounded-collections = { version = "0.2.0", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } @@ -39,7 +39,7 @@ sp-storage = { path = "../storage", default-features = false } sp-externalities = { path = "../externalities", optional = true } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } bitflags = "1.3" paste = "1.0.7" @@ -63,7 +63,7 @@ bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "e9782f9", [dev-dependencies] criterion = "0.4.0" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } lazy_static = "1.4.0" regex = "1.6.0" diff --git a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml index 2856c93c7eed..f244b02ca101 100644 --- a/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/crypto/hashing/proc-macro/Cargo.toml @@ -19,6 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = { version = "2.0.49", features = ["full", "parsing"] } +quote = { workspace = true } +syn = { features = ["full", "parsing"], workspace = true } sp-crypto-hashing = { path = "..", default-features = false } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index aaf5e618189f..debf964aa3df 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -19,8 +19,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.28" -syn = "2.0.49" +quote = { workspace = true } +syn = { workspace = true } proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index 6497465a8812..bf12433c5f40 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { path = "../api", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc", "arbitrary_precision"] } +serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true } [features] default = ["std"] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 2a9d205e1664..bfb1d7733471 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -21,7 +21,7 @@ async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } sp-runtime = { path = "../runtime", default-features = false, optional = true } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/maybe-compressed-blob/Cargo.toml b/substrate/primitives/maybe-compressed-blob/Cargo.toml index 84a5592dcaf2..fa5383d03b10 100644 --- a/substrate/primitives/maybe-compressed-blob/Cargo.toml +++ b/substrate/primitives/maybe-compressed-blob/Cargo.toml @@ -14,5 +14,5 @@ readme = "README.md" workspace = true [dependencies] -thiserror = "1.0" +thiserror = { workspace = true } zstd = { version = "0.12.4", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index f859bb5514ea..50d847782394 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -19,13 +19,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { workspace = true } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false, optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } -thiserror = { version = "1.0", optional = true } +thiserror = { optional = true, workspace = true } [dev-dependencies] array-bytes = "6.1" diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 8c3aee1d086c..7373aa849cb8 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/rpc/Cargo.toml b/substrate/primitives/rpc/Cargo.toml index 04bbe3eea53f..dce0eeee9f99 100644 --- a/substrate/primitives/rpc/Cargo.toml +++ b/substrate/primitives/rpc/Cargo.toml @@ -17,8 +17,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] rustc-hash = "1.1.0" -serde = { version = "1.0.196", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../core" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 8031ba75b9b9..7dbd810fea93 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -22,6 +22,6 @@ proc-macro = true Inflector = "0.11.4" proc-macro-crate = "3.0.0" proc-macro2 = "1.0.56" -quote = "1.0.28" +quote = { workspace = true } expander = "2.0.0" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 8e3b63a55de6..cacfc0597229 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -25,7 +25,7 @@ log = { workspace = true } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-arithmetic = { path = "../arithmetic", default-features = false } sp-core = { path = "../core", default-features = false } @@ -38,7 +38,7 @@ simple-mermaid = { version = "0.1.1", optional = true } [dev-dependencies] rand = "0.8.5" -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } zstd = { version = "0.12.4", default-features = false } sp-api = { path = "../api" } sp-state-machine = { path = "../state-machine" } diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index 9429432babb5..21346fbaca53 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -16,7 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index 489209ecc337..09994f1ae91e 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -23,7 +23,7 @@ log = { workspace = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8.5", optional = true } smallvec = "1.11.0" -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } sp-core = { path = "../core", default-features = false } sp-externalities = { path = "../externalities", default-features = false } diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 14af57a7b419..652ab3ef13aa 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -26,7 +26,7 @@ sp-api = { path = "../api", default-features = false } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-runtime-interface = { path = "../runtime-interface", default-features = false } sp-externalities = { path = "../externalities", default-features = false } -thiserror = { version = "1.0", optional = true } +thiserror = { optional = true, workspace = true } # ECIES dependencies ed25519-dalek = { version = "2.1", optional = true } diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index e15a3296d521..d3ade87ea47f 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -20,7 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index 0411326c28b8..f310216dd58d 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["derive"], optional = true } +serde = { features = ["derive"], optional = true, workspace = true } sp-application-crypto = { path = "../application-crypto", default-features = false } sp-core = { path = "../core", default-features = false } sp-runtime = { path = "../runtime", default-features = false } diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 914c1e7865e9..9e2b802bfb15 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = { version = "0.1.74", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index f871f462d363..16d3ca19a179 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -30,7 +30,7 @@ nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.8", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -thiserror = { version = "1.0.48", optional = true } +thiserror = { optional = true, workspace = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 21f957763757..a94e2322430b 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -21,8 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, features = ["alloc", "derive"], optional = true } -thiserror = { version = "1.0.48", optional = true } +serde = { features = ["alloc", "derive"], optional = true, workspace = true } +thiserror = { optional = true, workspace = true } sp-crypto-hashing-proc-macro = { path = "../crypto/hashing/proc-macro" } sp-runtime = { path = "../runtime", default-features = false } sp-std = { path = "../std", default-features = false } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index b99894f8ab5b..f7abf88c9a67 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -21,8 +21,8 @@ proc-macro = true [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } proc-macro2 = "1.0.56" -quote = "1.0.28" -syn = { version = "2.0.49", features = ["extra-traits", "fold", "full", "visit"] } +quote = { workspace = true } +syn = { features = ["extra-traits", "fold", "full", "visit"], workspace = true } [dev-dependencies] sp-version = { path = ".." } diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 87e0c6a7299d..a7d61de001b5 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] bounded-collections = { version = "0.2.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.196", default-features = false, optional = true, features = ["alloc", "derive"] } +serde = { optional = true, features = ["alloc", "derive"], workspace = true } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 4ec2857b1096..349b04d32d7b 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -20,8 +20,8 @@ array-bytes = "6.1" async-trait = "0.1.74" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ "test-helpers", diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 49b84e04dab4..3bba5cd5bf04 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -62,8 +62,8 @@ sc-executor-common = { path = "../../client/executor/common" } sp-consensus = { path = "../../primitives/consensus/common" } substrate-test-runtime-client = { path = "client" } sp-tracing = { path = "../../primitives/tracing" } -serde = { version = "1.0.196", features = ["alloc", "derive"], default-features = false } -serde_json = { version = "1.0.113", default-features = false, features = ["alloc"] } +serde = { features = ["alloc", "derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } [build-dependencies] substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } diff --git a/substrate/test-utils/runtime/transaction-pool/Cargo.toml b/substrate/test-utils/runtime/transaction-pool/Cargo.toml index b52a897438b6..33e56e8e55e7 100644 --- a/substrate/test-utils/runtime/transaction-pool/Cargo.toml +++ b/substrate/test-utils/runtime/transaction-pool/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" parking_lot = "0.12.1" -thiserror = "1.0" +thiserror = { workspace = true } sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 4f6cbc1d5d6a..a2db83052c54 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -29,9 +29,9 @@ linked-hash-map = "0.5.4" log = { workspace = true, default-features = true } rand = { version = "0.8.5", features = ["small_rng"] } rand_pcg = "0.3.1" -serde = "1.0.196" -serde_json = "1.0.113" -thiserror = "1.0.48" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +thiserror = { workspace = true } thousands = "0.2.0" frame-benchmarking = { path = "../../../frame/benchmarking" } frame-support = { path = "../../../frame/support" } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index c96d37d4ae63..61e0a861ee0e 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22", features = ["http-client"] } codec = { package = "parity-scale-codec", version = "3.6.1" } log = { workspace = true, default-features = true } -serde = "1.0.196" +serde = { workspace = true, default-features = true } sp-core = { path = "../../../primitives/core" } sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" } sp-state-machine = { path = "../../../primitives/state-machine" } diff --git a/substrate/utils/frame/rpc/client/Cargo.toml b/substrate/utils/frame/rpc/client/Cargo.toml index 0057bf6eb50b..b51e3f44f4e6 100644 --- a/substrate/utils/frame/rpc/client/Cargo.toml +++ b/substrate/utils/frame/rpc/client/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.22", features = ["ws-client"] } sc-rpc-api = { path = "../../../../client/rpc-api" } async-trait = "0.1.74" -serde = "1" +serde = { workspace = true, default-features = true } sp-runtime = { path = "../../../../primitives/runtime" } log = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 58c2f3d99acc..f9a45e21ce13 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -serde = { version = "1", features = ["derive"] } +serde = { features = ["derive"], workspace = true, default-features = true } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } @@ -32,4 +32,4 @@ sc-rpc-api = { path = "../../../../client/rpc-api" } sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.113" +serde_json = { workspace = true, default-features = true } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index a4b0a7777fbf..2e4bb6a10578 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.22", features = ["jsonrpsee-types"] } -serde = "1" +serde = { workspace = true, default-features = true } frame-support = { path = "../../../../frame/support" } sc-rpc-api = { path = "../../../../client/rpc-api" } sp-storage = { path = "../../../../primitives/storage" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 63165f07e5c0..d123fc5f62dc 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -42,8 +42,8 @@ clap = { version = "4.5.1", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = { workspace = true, default-features = true } parity-scale-codec = "3.6.1" -serde = "1.0.196" -serde_json = "1.0.113" +serde = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } zstd = { version = "0.12.4", default-features = false } [dev-dependencies] diff --git a/substrate/utils/prometheus/Cargo.toml b/substrate/utils/prometheus/Cargo.toml index 2a09cb2bb510..36527ac6183b 100644 --- a/substrate/utils/prometheus/Cargo.toml +++ b/substrate/utils/prometheus/Cargo.toml @@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"] hyper = { version = "0.14.16", default-features = false, features = ["http1", "server", "tcp"] } log = { workspace = true, default-features = true } prometheus = { version = "0.13.0", default-features = false } -thiserror = "1.0" +thiserror = { workspace = true } tokio = { version = "1.22.0", features = ["parking_lot"] } [dev-dependencies] From d250a6e4270a77f28e2737a4faa3fb78c8ea7a85 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 20 Feb 2024 15:28:05 +0100 Subject: [PATCH 05/51] Contracts: Stabilize APIs (#3384) Remove `#[unstable]` on `call_v2`, `instantiate_v2`, `lock_delegate_dependency` and `unlock_delegate_dependency`. See ink! integrations: - call_v2: https://github.com/paritytech/ink/pull/2077 - instantiate_v2: - lock/unlock dependency: https://github.com/paritytech/ink/pull/2076 --- prdoc/pr_3384.prdoc | 14 +++++++ .../contracts/fixtures/contracts/call.rs | 8 ++-- .../fixtures/contracts/call_return_code.rs | 8 ++-- .../contracts/call_runtime_and_call.rs | 8 ++-- .../fixtures/contracts/call_with_limit.rs | 1 - .../fixtures/contracts/caller_contract.rs | 10 +---- .../contracts/chain_extension_temp_storage.rs | 8 ++-- .../contracts/create_storage_and_call.rs | 3 +- .../create_storage_and_instantiate.rs | 1 - .../contracts/destroy_and_transfer.rs | 3 -- .../contracts/instantiate_return_code.rs | 1 - ...ency.rs => locking_delegate_dependency.rs} | 12 +++--- .../contracts/reentrance_count_call.rs | 8 ++-- .../fixtures/contracts/self_destruct.rs | 6 ++- .../frame/contracts/src/benchmarking/mod.rs | 12 +++--- substrate/frame/contracts/src/exec.rs | 16 ++++---- substrate/frame/contracts/src/lib.rs | 4 +- substrate/frame/contracts/src/schedule.rs | 12 +++--- substrate/frame/contracts/src/storage.rs | 6 +-- substrate/frame/contracts/src/tests.rs | 38 +++++++++---------- substrate/frame/contracts/src/wasm/mod.rs | 20 +++++----- substrate/frame/contracts/src/wasm/runtime.rs | 30 +++++++-------- substrate/frame/contracts/src/weights.rs | 12 +++--- substrate/frame/contracts/uapi/src/host.rs | 20 +++------- .../frame/contracts/uapi/src/host/riscv32.rs | 4 +- .../frame/contracts/uapi/src/host/wasm32.rs | 12 +++--- 26 files changed, 137 insertions(+), 140 deletions(-) create mode 100644 prdoc/pr_3384.prdoc rename substrate/frame/contracts/fixtures/contracts/{add_remove_delegate_dependency.rs => locking_delegate_dependency.rs} (83%) diff --git a/prdoc/pr_3384.prdoc b/prdoc/pr_3384.prdoc new file mode 100644 index 000000000000..c19464977582 --- /dev/null +++ b/prdoc/pr_3384.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_contracts] stabilize `call_v2`, `instantiate_v2`, `lock_dependency` and `unlock_dependency`" + +doc: + - audience: Runtime Dev + description: | + These APIs are currently unstable and are being stabilized in this PR. + Note: `add_delegate_dependency` and `remove_delegate_dependency` have been renamed to `lock_dependency` and `unlock_dependency` respectively. + +crates: + - name: pallet-contracts-uapi + - name: pallet-contracts diff --git a/substrate/frame/contracts/fixtures/contracts/call.rs b/substrate/frame/contracts/fixtures/contracts/call.rs index f7d9d862d746..535745ffc0bc 100644 --- a/substrate/frame/contracts/fixtures/contracts/call.rs +++ b/substrate/frame/contracts/fixtures/contracts/call.rs @@ -35,11 +35,13 @@ pub extern "C" fn call() { ); // Call the callee - api::call_v1( + api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/call_return_code.rs b/substrate/frame/contracts/fixtures/contracts/call_return_code.rs index 1256588d3b58..3d5a1073eaec 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_return_code.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_return_code.rs @@ -38,11 +38,13 @@ pub extern "C" fn call() { ); // Call the callee - let err_code = match api::call_v1( + let err_code = match api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &100u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &100u64.to_le_bytes(), // Value transferred to the contract. input, None, ) { diff --git a/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs b/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs index fdeb6097e732..1321d36dc7ef 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_runtime_and_call.rs @@ -39,11 +39,13 @@ pub extern "C" fn call() { api::call_runtime(call).unwrap(); // Call the callee - api::call_v1( + api::call_v2( uapi::CallFlags::empty(), callee_addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. callee_input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs b/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs index 40cde234b44a..f0851f32c75b 100644 --- a/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs +++ b/substrate/frame/contracts/fixtures/contracts/call_with_limit.rs @@ -38,7 +38,6 @@ pub extern "C" fn call() { forwarded_input: [u8], ); - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee_addr, diff --git a/substrate/frame/contracts/fixtures/contracts/caller_contract.rs b/substrate/frame/contracts/fixtures/contracts/caller_contract.rs index c2629e9fa197..fffdb66a33d7 100644 --- a/substrate/frame/contracts/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/contracts/fixtures/contracts/caller_contract.rs @@ -40,7 +40,6 @@ pub extern "C" fn call() { let reverted_input = [1u8, 34, 51, 68, 85, 102, 119]; // Fail to deploy the contract since it returns a non-zero exit status. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -55,7 +54,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeReverted))); // Fail to deploy the contract due to insufficient ref_time weight. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 1u64, // too little ref_time weight 0u64, // How much proof_size weight to devote for the execution. 0 = all. @@ -65,7 +63,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to deploy the contract due to insufficient proof_size weight. - #[allow(deprecated)] let res = api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 1u64, // Too little proof_size weight @@ -78,7 +75,6 @@ pub extern "C" fn call() { let mut callee = [0u8; 32]; let callee = &mut &mut callee[..]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -94,7 +90,6 @@ pub extern "C" fn call() { assert_eq!(callee.len(), 32); // Call the new contract and expect it to return failing exit code. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, @@ -108,11 +103,10 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeReverted))); // Fail to call the contract due to insufficient ref_time weight. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, - 1u64, // too little ref_time weight + 1u64, // Too little ref_time weight. 0u64, // How much proof_size weight to devote for the execution. 0 = all. None, // No deposit limit. &value, @@ -122,7 +116,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(ReturnErrorCode::CalleeTrapped))); // Fail to call the contract due to insufficient proof_size weight. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee, @@ -137,7 +130,6 @@ pub extern "C" fn call() { // Call the contract successfully. let mut output = [0u8; 4]; - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee, diff --git a/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs b/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs index 1ab08efb3c7e..2e15fb02a12c 100644 --- a/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs +++ b/substrate/frame/contracts/fixtures/contracts/chain_extension_temp_storage.rs @@ -50,11 +50,13 @@ pub extern "C" fn call() { output!(addr, [0u8; 32], api::address,); // call self - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs index 8b79dd87dffd..f8ce0ff4fb84 100644 --- a/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/create_storage_and_call.rs @@ -40,14 +40,13 @@ pub extern "C" fn call() { api::set_storage(buffer, &[1u8; 4]); // Call the callee - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee, 0u64, // How much ref_time weight to devote for the execution. 0 = all. 0u64, // How much proof_size weight to devote for the execution. 0 = all. Some(deposit_limit), - &0u64.to_le_bytes(), // value transferred to the contract. + &0u64.to_le_bytes(), // Value transferred to the contract. input, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs index c68d99eff821..fa3b9000a7ed 100644 --- a/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs +++ b/substrate/frame/contracts/fixtures/contracts/create_storage_and_instantiate.rs @@ -40,7 +40,6 @@ pub extern "C" fn call() { let mut address = [0u8; 32]; let address = &mut &mut address[..]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. diff --git a/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs b/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs index cfcfd60f21ee..62fb63b56020 100644 --- a/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs +++ b/substrate/frame/contracts/fixtures/contracts/destroy_and_transfer.rs @@ -34,7 +34,6 @@ pub extern "C" fn deploy() { let address = &mut &mut address[..]; let salt = [71u8, 17u8]; - #[allow(deprecated)] api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. @@ -60,7 +59,6 @@ pub extern "C" fn call() { api::get_storage(&ADDRESS_KEY, callee_addr).unwrap(); // Calling the destination contract with non-empty input data should fail. - #[allow(deprecated)] let res = api::call_v2( uapi::CallFlags::empty(), callee_addr, @@ -74,7 +72,6 @@ pub extern "C" fn call() { assert!(matches!(res, Err(uapi::ReturnErrorCode::CalleeTrapped))); // Call the destination contract regularly, forcing it to self-destruct. - #[allow(deprecated)] api::call_v2( uapi::CallFlags::empty(), callee_addr, diff --git a/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs b/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs index 67cd739d85a6..de194abe4840 100644 --- a/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs +++ b/substrate/frame/contracts/fixtures/contracts/instantiate_return_code.rs @@ -31,7 +31,6 @@ pub extern "C" fn call() { input!(buffer, 36, code_hash: [u8; 32],); let input = &buffer[32..]; - #[allow(deprecated)] let err_code = match api::instantiate_v2( code_hash, 0u64, // How much ref_time weight to devote for the execution. 0 = all. diff --git a/substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs b/substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs similarity index 83% rename from substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs rename to substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs index 759ff7993740..bb76c942aad1 100644 --- a/substrate/frame/contracts/fixtures/contracts/add_remove_delegate_dependency.rs +++ b/substrate/frame/contracts/fixtures/contracts/locking_delegate_dependency.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! This contract tests the behavior of adding / removing delegate_dependencies when delegate +//! This contract tests the behavior of locking / unlocking delegate_dependencies when delegate //! calling into a contract. #![no_std] #![no_main] @@ -34,15 +34,13 @@ fn load_input(delegate_call: bool) { ); match action { - // 1 = Add delegate dependency + // 1 = Lock delegate dependency 1 => { - #[allow(deprecated)] - api::add_delegate_dependency(code_hash); + api::lock_delegate_dependency(code_hash); }, - // 2 = Remove delegate dependency + // 2 = Unlock delegate dependency 2 => { - #[allow(deprecated)] - api::remove_delegate_dependency(code_hash); + api::unlock_delegate_dependency(code_hash); }, // 3 = Terminate 3 => { diff --git a/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs b/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs index 9812dce2e818..0acfe017f8df 100644 --- a/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs +++ b/substrate/frame/contracts/fixtures/contracts/reentrance_count_call.rs @@ -42,11 +42,13 @@ pub extern "C" fn call() { if expected_reentrance_count != 5 { let count = (expected_reentrance_count + 1).to_le_bytes(); - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. - &0u64.to_le_bytes(), // value transferred to the contract. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. &count, None, ) diff --git a/substrate/frame/contracts/fixtures/contracts/self_destruct.rs b/substrate/frame/contracts/fixtures/contracts/self_destruct.rs index 94c3ac387a0a..d3836d2d8c73 100644 --- a/substrate/frame/contracts/fixtures/contracts/self_destruct.rs +++ b/substrate/frame/contracts/fixtures/contracts/self_destruct.rs @@ -37,10 +37,12 @@ pub extern "C" fn call() { if !input.is_empty() { output!(addr, [0u8; 32], api::address,); - api::call_v1( + api::call_v2( uapi::CallFlags::ALLOW_REENTRY, addr, - 0u64, // How much gas to devote for the execution. 0 = all. + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much proof_size to devote for the execution. 0 = all. + None, // No deposit limit. &0u64.to_le_bytes(), // Value to transfer. &[0u8; 0], None, diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index d6c24daa7c4d..4b6ff63c6f5d 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -895,7 +895,7 @@ benchmarks! { }, ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None, } @@ -2402,7 +2402,7 @@ benchmarks! { }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) #[pov_mode = Measured] - add_delegate_dependency { + lock_delegate_dependency { let r in 0 .. T::MaxDelegateDependencies::get(); let code_hashes = (0..r) .map(|i| { @@ -2420,7 +2420,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None, }], @@ -2440,7 +2440,7 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::MAX, None, vec![]) - remove_delegate_dependency { + unlock_delegate_dependency { let r in 0 .. T::MaxDelegateDependencies::get(); let code_hashes = (0..r) .map(|i| { @@ -2459,12 +2459,12 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "seal0", - name: "remove_delegate_dependency", + name: "unlock_delegate_dependency", params: vec![ValueType::I32], return_type: None, }, ImportedFunction { module: "seal0", - name: "add_delegate_dependency", + name: "lock_delegate_dependency", params: vec![ValueType::I32], return_type: None }], diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index 79220021efe4..7da321af547d 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -348,20 +348,20 @@ pub trait Ext: sealing::Sealed { /// - [`Error::::MaxDelegateDependenciesReached`] /// - [`Error::::CannotAddSelfAsDelegateDependency`] /// - [`Error::::DelegateDependencyAlreadyExists`] - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code_hash: CodeHash, ) -> Result<(), DispatchError>; /// Removes a delegate dependency from [`ContractInfo`]'s `delegate_dependencies` field. /// - /// This is the counterpart of [`Self::add_delegate_dependency`]. It decreases the reference - /// count and refunds the deposit that was charged by [`Self::add_delegate_dependency`]. + /// This is the counterpart of [`Self::lock_delegate_dependency`]. It decreases the reference + /// count and refunds the deposit that was charged by [`Self::lock_delegate_dependency`]. /// /// # Errors /// /// - [`Error::::DelegateDependencyNotFound`] - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError>; @@ -1554,7 +1554,7 @@ where }); } - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code_hash: CodeHash, ) -> Result<(), DispatchError> { @@ -1565,7 +1565,7 @@ where let code_info = CodeInfoOf::::get(code_hash).ok_or(Error::::CodeNotFound)?; let deposit = T::CodeHashLockupDepositPercent::get().mul_ceil(code_info.deposit()); - info.add_delegate_dependency(code_hash, deposit)?; + info.lock_delegate_dependency(code_hash, deposit)?; Self::increment_refcount(code_hash)?; frame .nested_storage @@ -1573,14 +1573,14 @@ where Ok(()) } - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result<(), DispatchError> { let frame = self.top_frame_mut(); let info = frame.contract_info.get(&frame.account_id); - let deposit = info.remove_delegate_dependency(code_hash)?; + let deposit = info.unlock_delegate_dependency(code_hash)?; Self::decrement_refcount(*code_hash); frame .nested_storage diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 533085f2b874..943bf5f85d4e 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -325,7 +325,7 @@ pub mod pallet { type DepositPerItem: Get>; /// The percentage of the storage deposit that should be held for using a code hash. - /// Instantiating a contract, or calling [`chain_extension::Ext::add_delegate_dependency`] + /// Instantiating a contract, or calling [`chain_extension::Ext::lock_delegate_dependency`] /// protects the code from being removed. In order to prevent abuse these actions are /// protected with a percentage of the code deposit. #[pallet::constant] @@ -347,7 +347,7 @@ pub mod pallet { type MaxStorageKeyLen: Get; /// The maximum number of delegate_dependencies that a contract can lock with - /// [`chain_extension::Ext::add_delegate_dependency`]. + /// [`chain_extension::Ext::lock_delegate_dependency`]. #[pallet::constant] type MaxDelegateDependencies: Get; diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index 54e037d6be94..b2e3801deaec 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -298,11 +298,11 @@ pub struct HostFnWeights { /// Weight of calling `instantiation_nonce`. pub instantiation_nonce: Weight, - /// Weight of calling `add_delegate_dependency`. - pub add_delegate_dependency: Weight, + /// Weight of calling `lock_delegate_dependency`. + pub lock_delegate_dependency: Weight, - /// Weight of calling `remove_delegate_dependency`. - pub remove_delegate_dependency: Weight, + /// Weight of calling `unlock_delegate_dependency`. + pub unlock_delegate_dependency: Weight, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -435,8 +435,8 @@ impl Default for HostFnWeights { reentrance_count: cost!(seal_reentrance_count), account_reentrance_count: cost!(seal_account_reentrance_count), instantiation_nonce: cost!(seal_instantiation_nonce), - add_delegate_dependency: cost!(add_delegate_dependency), - remove_delegate_dependency: cost!(remove_delegate_dependency), + lock_delegate_dependency: cost!(lock_delegate_dependency), + unlock_delegate_dependency: cost!(unlock_delegate_dependency), _phantom: PhantomData, } } diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index d4d261f4ec1f..3304166607d2 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -66,7 +66,7 @@ pub struct ContractInfo { storage_base_deposit: BalanceOf, /// Map of code hashes and deposit balances. /// - /// Tracks the code hash and deposit held for adding delegate dependencies. Dependencies added + /// Tracks the code hash and deposit held for locking delegate dependencies. Dependencies added /// to the map can not be removed from the chain state and can be safely used for delegate /// calls. delegate_dependencies: BoundedBTreeMap, BalanceOf, T::MaxDelegateDependencies>, @@ -233,7 +233,7 @@ impl ContractInfo { /// /// Returns an error if the maximum number of delegate_dependencies is reached or if /// the delegate dependency already exists. - pub fn add_delegate_dependency( + pub fn lock_delegate_dependency( &mut self, code_hash: CodeHash, amount: BalanceOf, @@ -249,7 +249,7 @@ impl ContractInfo { /// dependency. /// /// Returns an error if the entry doesn't exist. - pub fn remove_delegate_dependency( + pub fn unlock_delegate_dependency( &mut self, code_hash: &CodeHash, ) -> Result, DispatchError> { diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index a06b65b86b5a..cd4e43ed4c27 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -5406,21 +5406,21 @@ fn delegate_call_indeterministic_code() { } #[test] -fn add_remove_delegate_dependency_works() { +fn locking_delegate_dependency_works() { // set hash lock up deposit to 30%, to test deposit calculation. CODE_HASH_LOCKUP_DEPOSIT_PERCENT.with(|c| *c.borrow_mut() = Perbill::from_percent(30)); MAX_DELEGATE_DEPENDENCIES.with(|c| *c.borrow_mut() = 1); let (wasm_caller, self_code_hash) = - compile_module::("add_remove_delegate_dependency").unwrap(); + compile_module::("locking_delegate_dependency").unwrap(); let (wasm_callee, code_hash) = compile_module::("dummy").unwrap(); let (wasm_other, other_code_hash) = compile_module::("call").unwrap(); - // Define inputs with various actions to test adding / removing delegate_dependencies. + // Define inputs with various actions to test locking / unlocking delegate_dependencies. // See the contract for more details. let noop_input = (0u32, code_hash); - let add_delegate_dependency_input = (1u32, code_hash); - let remove_delegate_dependency_input = (2u32, code_hash); + let lock_delegate_dependency_input = (1u32, code_hash); + let unlock_delegate_dependency_input = (2u32, code_hash); let terminate_input = (3u32, code_hash); // Instantiate the caller contract with the given input. @@ -5457,9 +5457,9 @@ fn add_remove_delegate_dependency_works() { ExtBuilder::default().existential_deposit(ED).build().execute_with(|| { let _ = Balances::set_balance(&ALICE, 1_000_000); - // Instantiate with add_delegate_dependency should fail since the code is not yet on chain. + // Instantiate with lock_delegate_dependency should fail since the code is not yet on chain. assert_err!( - instantiate(&add_delegate_dependency_input).result, + instantiate(&lock_delegate_dependency_input).result, Error::::CodeNotFound ); @@ -5469,7 +5469,7 @@ fn add_remove_delegate_dependency_works() { .unwrap(); // Instantiate should now work. - let addr_caller = instantiate(&add_delegate_dependency_input).result.unwrap().account_id; + let addr_caller = instantiate(&lock_delegate_dependency_input).result.unwrap().account_id; // There should be a dependency and a deposit. let contract = test_utils::get_contract(&addr_caller); @@ -5490,27 +5490,27 @@ fn add_remove_delegate_dependency_works() { >::CodeInUse ); - // Adding an already existing dependency should fail. + // Locking an already existing dependency should fail. assert_err!( - call(&addr_caller, &add_delegate_dependency_input).result, + call(&addr_caller, &lock_delegate_dependency_input).result, Error::::DelegateDependencyAlreadyExists ); - // Adding a dependency to self should fail. + // Locking self should fail. assert_err!( call(&addr_caller, &(1u32, self_code_hash)).result, Error::::CannotAddSelfAsDelegateDependency ); - // Adding more than the maximum allowed delegate_dependencies should fail. + // Locking more than the maximum allowed delegate_dependencies should fail. Contracts::bare_upload_code(ALICE, wasm_other, None, Determinism::Enforced).unwrap(); assert_err!( call(&addr_caller, &(1u32, other_code_hash)).result, Error::::MaxDelegateDependenciesReached ); - // Removing dependency should work. - assert_ok!(call(&addr_caller, &remove_delegate_dependency_input).result); + // Unlocking dependency should work. + assert_ok!(call(&addr_caller, &unlock_delegate_dependency_input).result); // Dependency should be removed, and deposit should be returned. let contract = test_utils::get_contract(&addr_caller); @@ -5525,18 +5525,18 @@ fn add_remove_delegate_dependency_works() { // Removing an unexisting dependency should fail. assert_err!( - call(&addr_caller, &remove_delegate_dependency_input).result, + call(&addr_caller, &unlock_delegate_dependency_input).result, Error::::DelegateDependencyNotFound ); - // Adding a dependency with a storage limit too low should fail. + // Locking a dependency with a storage limit too low should fail. DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = dependency_deposit - 1); assert_err!( - call(&addr_caller, &add_delegate_dependency_input).result, + call(&addr_caller, &lock_delegate_dependency_input).result, Error::::StorageDepositLimitExhausted ); - // Since we removed the dependency we should now be able to remove the code. + // Since we unlocked the dependency we should now be able to remove the code. assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); // Calling should fail since the delegated contract is not on chain anymore. @@ -5545,7 +5545,7 @@ fn add_remove_delegate_dependency_works() { // Restore initial deposit limit and add the dependency back. DEFAULT_DEPOSIT_LIMIT.with(|c| *c.borrow_mut() = 10_000_000); Contracts::bare_upload_code(ALICE, wasm_callee, None, Determinism::Enforced).unwrap(); - call(&addr_caller, &add_delegate_dependency_input).result.unwrap(); + call(&addr_caller, &lock_delegate_dependency_input).result.unwrap(); // Call terminate should work, and return the deposit. let balance_before = test_utils::get_balance(&ALICE); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 1c7395ec3bff..5386f4d0ffdf 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -733,14 +733,14 @@ mod tests { Ok(()) } fn decrement_refcount(_code_hash: CodeHash) {} - fn add_delegate_dependency( + fn lock_delegate_dependency( &mut self, code: CodeHash, ) -> Result<(), DispatchError> { self.delegate_dependencies.borrow_mut().insert(code); Ok(()) } - fn remove_delegate_dependency( + fn unlock_delegate_dependency( &mut self, code: &CodeHash, ) -> Result<(), DispatchError> { @@ -3399,16 +3399,16 @@ mod tests { } #[test] - fn add_remove_delegate_dependency() { - const CODE_ADD_REMOVE_DELEGATE_DEPENDENCY: &str = r#" + fn lock_unlock_delegate_dependency() { + const CODE_LOCK_UNLOCK_DELEGATE_DEPENDENCY: &str = r#" (module - (import "seal0" "add_delegate_dependency" (func $add_delegate_dependency (param i32))) - (import "seal0" "remove_delegate_dependency" (func $remove_delegate_dependency (param i32))) + (import "seal0" "lock_delegate_dependency" (func $lock_delegate_dependency (param i32))) + (import "seal0" "unlock_delegate_dependency" (func $unlock_delegate_dependency (param i32))) (import "env" "memory" (memory 1 1)) (func (export "call") - (call $add_delegate_dependency (i32.const 0)) - (call $add_delegate_dependency (i32.const 32)) - (call $remove_delegate_dependency (i32.const 32)) + (call $lock_delegate_dependency (i32.const 0)) + (call $lock_delegate_dependency (i32.const 32)) + (call $unlock_delegate_dependency (i32.const 32)) ) (func (export "deploy")) @@ -3426,7 +3426,7 @@ mod tests { ) "#; let mut mock_ext = MockExt::default(); - assert_ok!(execute(&CODE_ADD_REMOVE_DELEGATE_DEPENDENCY, vec![], &mut mock_ext)); + assert_ok!(execute(&CODE_LOCK_UNLOCK_DELEGATE_DEPENDENCY, vec![], &mut mock_ext)); let delegate_dependencies: Vec<_> = mock_ext.delegate_dependencies.into_inner().into_iter().collect(); assert_eq!(delegate_dependencies.len(), 1); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 74b19910c371..f440c818166d 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -246,10 +246,10 @@ pub enum RuntimeCosts { AccountEntranceCount, /// Weight of calling `instantiation_nonce` InstantationNonce, - /// Weight of calling `add_delegate_dependency` - AddDelegateDependency, - /// Weight of calling `remove_delegate_dependency` - RemoveDelegateDependency, + /// Weight of calling `lock_delegate_dependency` + LockDelegateDependency, + /// Weight of calling `unlock_delegate_dependency` + UnlockDelegateDependency, } impl Token for RuntimeCosts { @@ -338,8 +338,8 @@ impl Token for RuntimeCosts { ReentrantCount => s.reentrance_count, AccountEntranceCount => s.account_reentrance_count, InstantationNonce => s.instantiation_nonce, - AddDelegateDependency => s.add_delegate_dependency, - RemoveDelegateDependency => s.remove_delegate_dependency, + LockDelegateDependency => s.lock_delegate_dependency, + UnlockDelegateDependency => s.unlock_delegate_dependency, } } } @@ -1216,7 +1216,6 @@ pub mod env { /// Make a call to another contract. /// See [`pallet_contracts_uapi::HostFn::call_v2`]. #[version(2)] - #[unstable] fn call( ctx: _, memory: _, @@ -1353,7 +1352,6 @@ pub mod env { /// Instantiate a contract with the specified code hash. /// See [`pallet_contracts_uapi::HostFn::instantiate_v2`]. #[version(2)] - #[unstable] fn instantiate( ctx: _, memory: _, @@ -2306,22 +2304,22 @@ pub mod env { } /// Adds a new delegate dependency to the contract. - /// See [`pallet_contracts_uapi::HostFn::add_delegate_dependency`]. + /// See [`pallet_contracts_uapi::HostFn::lock_delegate_dependency`]. #[unstable] - fn add_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::AddDelegateDependency)?; + fn lock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::LockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; - ctx.ext.add_delegate_dependency(code_hash)?; + ctx.ext.lock_delegate_dependency(code_hash)?; Ok(()) } /// Removes the delegate dependency from the contract. - /// see [`pallet_contracts_uapi::HostFn::remove_delegate_dependency`]. + /// see [`pallet_contracts_uapi::HostFn::unlock_delegate_dependency`]. #[unstable] - fn remove_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { - ctx.charge_gas(RuntimeCosts::RemoveDelegateDependency)?; + fn unlock_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { + ctx.charge_gas(RuntimeCosts::UnlockDelegateDependency)?; let code_hash = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; - ctx.ext.remove_delegate_dependency(&code_hash)?; + ctx.ext.unlock_delegate_dependency(&code_hash)?; Ok(()) } } diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index fa9df922a7cb..962591290b31 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -124,8 +124,8 @@ pub trait WeightInfo { fn seal_ecdsa_recover(r: u32, ) -> Weight; fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight; fn seal_set_code_hash(r: u32, ) -> Weight; - fn add_delegate_dependency(r: u32, ) -> Weight; - fn remove_delegate_dependency(r: u32, ) -> Weight; + fn lock_delegate_dependency(r: u32, ) -> Weight; + fn unlock_delegate_dependency(r: u32, ) -> Weight; fn seal_reentrance_count(r: u32, ) -> Weight; fn seal_account_reentrance_count(r: u32, ) -> Weight; fn seal_instantiation_nonce(r: u32, ) -> Weight; @@ -1891,7 +1891,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn add_delegate_dependency(r: u32, ) -> Weight { + fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `928 + r * (131 ±0)` // Estimated: `6878 + r * (2606 ±0)` @@ -1920,7 +1920,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn remove_delegate_dependency(r: u32, ) -> Weight { + fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `969 + r * (183 ±0)` // Estimated: `129453 + r * (2568 ±0)` @@ -3787,7 +3787,7 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn add_delegate_dependency(r: u32, ) -> Weight { + fn lock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `928 + r * (131 ±0)` // Estimated: `6878 + r * (2606 ±0)` @@ -3816,7 +3816,7 @@ impl WeightInfo for () { /// Storage: `System::EventTopics` (r:2 w:2) /// Proof: `System::EventTopics` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `r` is `[0, 32]`. - fn remove_delegate_dependency(r: u32, ) -> Weight { + fn unlock_delegate_dependency(r: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `969 + r * (183 ±0)` // Estimated: `129453 + r * (2568 ±0)` diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 7894dc3a210a..c25be4479cef 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -93,7 +93,7 @@ pub trait HostFn { /// - `output`: A reference to the output data buffer to write the address. fn address(output: &mut &mut [u8]); - /// Adds a new delegate dependency to the contract. + /// Lock a new delegate dependency to the contract. /// /// Traps if the maximum number of delegate_dependencies is reached or if /// the delegate dependency already exists. @@ -102,10 +102,7 @@ pub trait HostFn { /// /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps /// otherwise. - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] - fn add_delegate_dependency(code_hash: &[u8]); + fn lock_delegate_dependency(code_hash: &[u8]); /// Stores the *free* balance of the current account into the supplied buffer. /// @@ -142,6 +139,7 @@ pub trait HostFn { /// /// Equivalent to the newer [`Self::call_v2`] version but works with /// *ref_time* Weight only + #[deprecated(note = "Deprecated, use newer version instead")] fn call_v1( flags: CallFlags, callee: &[u8], @@ -178,9 +176,6 @@ pub trait HostFn { /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] fn call_v2( flags: CallFlags, callee: &[u8], @@ -480,6 +475,7 @@ pub trait HostFn { /// /// Equivalent to the newer [`Self::instantiate_v2`] version but works /// with *ref_time* Weight only. + #[deprecated(note = "Deprecated, use newer version instead")] fn instantiate_v1( code_hash: &[u8], gas: u64, @@ -524,9 +520,6 @@ pub trait HostFn { /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] fn instantiate_v2( code_hash: &[u8], ref_time_limit: u64, @@ -602,10 +595,7 @@ pub trait HostFn { /// /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps /// otherwise. - #[deprecated( - note = "Unstable function. Behaviour can change without further notice. Use only for testing." - )] - fn remove_delegate_dependency(code_hash: &[u8]); + fn unlock_delegate_dependency(code_hash: &[u8]); /// Cease contract execution and save a data buffer as a result of the execution. /// diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs index dc1c48308df8..dbd5abc42409 100644 --- a/substrate/frame/contracts/uapi/src/host/riscv32.rs +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -281,11 +281,11 @@ impl HostFn for HostFnImpl { todo!() } - fn add_delegate_dependency(code_hash: &[u8]) { + fn lock_delegate_dependency(code_hash: &[u8]) { todo!() } - fn remove_delegate_dependency(code_hash: &[u8]) { + fn unlock_delegate_dependency(code_hash: &[u8]) { todo!() } diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs index 3df3abeb7dcd..9651aa73d6f9 100644 --- a/substrate/frame/contracts/uapi/src/host/wasm32.rs +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -23,7 +23,7 @@ mod sys { extern "C" { pub fn account_reentrance_count(account_ptr: *const u8) -> u32; - pub fn add_delegate_dependency(code_hash_ptr: *const u8); + pub fn lock_delegate_dependency(code_hash_ptr: *const u8); pub fn address(output_ptr: *mut u8, output_len_ptr: *mut u32); @@ -125,7 +125,7 @@ mod sys { pub fn reentrance_count() -> u32; - pub fn remove_delegate_dependency(code_hash_ptr: *const u8); + pub fn unlock_delegate_dependency(code_hash_ptr: *const u8); pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32) -> !; @@ -803,12 +803,12 @@ impl HostFn for HostFnImpl { unsafe { sys::account_reentrance_count(account.as_ptr()) } } - fn add_delegate_dependency(code_hash: &[u8]) { - unsafe { sys::add_delegate_dependency(code_hash.as_ptr()) } + fn lock_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::lock_delegate_dependency(code_hash.as_ptr()) } } - fn remove_delegate_dependency(code_hash: &[u8]) { - unsafe { sys::remove_delegate_dependency(code_hash.as_ptr()) } + fn unlock_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::unlock_delegate_dependency(code_hash.as_ptr()) } } fn instantiation_nonce() -> u64 { From ff40310d872547087dfdb5d60fc2e99fa9d47b16 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 20 Feb 2024 16:26:11 +0100 Subject: [PATCH 06/51] Externalize received fees checks from `receive_reserve_asset_deposited_from_different_consensus_works` (#3409) Backport of patch-fix: https://github.com/paritytech/polkadot-sdk/pull/3404 --- Cargo.lock | 1 - .../assets/asset-hub-rococo/tests/tests.rs | 176 +++++++++++++++++- .../assets/asset-hub-westend/tests/tests.rs | 169 ++++++++++++++++- .../runtimes/assets/test-utils/Cargo.toml | 2 - .../test-utils/src/test_cases_over_bridge.rs | 134 +++---------- 5 files changed, 346 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c26c4081bbd9..2f8adf0e5f44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1050,7 +1050,6 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "pallet-asset-conversion", "pallet-assets", "pallet-balances", "pallet-collator-selection", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index cff2290222c3..38c118dbb4b2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -22,13 +22,13 @@ use asset_hub_rococo_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, TokenLocation, TokenLocationV3, TrustBackedAssetsPalletLocation, - TrustBackedAssetsPalletLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TokenLocation, TokenLocationV3, + TrustBackedAssetsPalletLocation, TrustBackedAssetsPalletLocationV3, XcmConfig, }, AllPalletsWithoutSystem, AssetConversion, AssetDeposit, Assets, Balances, CollatorSelection, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, - MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, - ToWestendXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + MetadataDepositPerByte, ParachainSystem, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + SessionKeys, ToWestendXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, @@ -49,6 +49,7 @@ use frame_support::{ use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_runtime::traits::MaybeEquivalence; +use sp_std::ops::Mul; use std::convert::Into; use testnet_parachains_constants::rococo::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; @@ -85,6 +86,52 @@ fn slot_durations() -> SlotDurations { } } +fn setup_pool_for_paying_fees_with_foreign_assets( + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountId, + xcm::v3::Location, + Balance, + ), +) { + let existential_deposit = ExistentialDeposit::get(); + + // setup a pool to pay fees with `foreign_asset_id_location` tokens + let pool_owner: AccountId = [14u8; 32].into(); + let native_asset = xcm::v3::Location::parent(); + let pool_liquidity: Balance = + existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); + + let _ = Balances::force_set_balance( + RuntimeOrigin::root(), + pool_owner.clone().into(), + (existential_deposit + pool_liquidity).mul(2).into(), + ); + + assert_ok!(ForeignAssets::mint( + RuntimeOrigin::signed(foreign_asset_owner), + foreign_asset_id_location.into(), + pool_owner.clone().into(), + (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), + )); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()) + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()), + pool_liquidity, + pool_liquidity, + 1, + 1, + pool_owner, + )); +} + #[test] fn test_buy_and_refund_weight_in_native() { ExtBuilder::::default() @@ -1056,7 +1103,8 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; - use asset_hub_rococo_runtime::{PolkadotXcm, RuntimeOrigin}; + use asset_hub_rococo_runtime::PolkadotXcm; + use xcm_executor::traits::ConvertLocation; fn bridging_to_asset_hub_westend() -> TestBridgingConfig { let _ = PolkadotXcm::force_xcm_version( @@ -1081,27 +1129,135 @@ mod asset_hub_rococo_tests { } #[test] - fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { + fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_fees_paid_by_pool_swap_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = xcm::v3::Location::new( + 2, + [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], + ); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = + LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + block_author_account, + // receiving WNDs + foreign_asset_create_params.clone(), + 1000000000000, + || { + // setup pool for paying fees to touch `SwapFirstAssetTrader` + setup_pool_for_paying_fees_with_foreign_assets(foreign_asset_create_params); + // staking pot account for collecting local native fees from `BuyExecution` + let _ = Balances::force_set_balance(RuntimeOrigin::root(), StakingPot::get().into(), ExistentialDeposit::get()); + // prepare bridge configuration + bridging_to_asset_hub_westend() + }, + ( + [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), + GlobalConsensus(Westend), + [Parachain(1000)].into() + ), + || { + // check staking pot for ED + assert_eq!(Balances::free_balance(&staking_pot), ExistentialDeposit::get()); + // check now foreign asset for staking pot + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + }, + || { + // `SwapFirstAssetTrader` - staking pot receives xcm fees in ROCs + assert!( + Balances::free_balance(&staking_pot) > ExistentialDeposit::get() + ); + // staking pot receives no foreign assets + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + } + ) + } + + #[test] + fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_fees_paid_by_sufficient_asset_works( + ) { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = xcm::v3::Location::new( + 2, + [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], + ); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = + LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsInstance, >( collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), ExistentialDeposit::get(), AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), + block_author_account.clone(), // receiving WNDs - (xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)]), 1000000000000, 1_000_000_000), + foreign_asset_create_params, + 1000000000000, bridging_to_asset_hub_westend, ( [PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX)].into(), GlobalConsensus(Westend), [Parachain(1000)].into() - ) + ), + || { + // check block author before + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ), + 0 + ); + }, + || { + // `TakeFirstAssetTrader` puts fees to the block author + assert!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ) > 0 + ); + // `SwapFirstAssetTrader` did not work + assert_eq!(Balances::free_balance(&staking_pot), 0); + } ) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 5f6e4c007cd9..aa8c3cf2f14d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -22,8 +22,8 @@ use asset_hub_westend_runtime::{ xcm_config::{ bridging, AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, ForeignCreatorsSovereignAccountOf, - LocationToAccountId, TrustBackedAssetsPalletLocation, TrustBackedAssetsPalletLocationV3, - WestendLocation, WestendLocationV3, XcmConfig, + LocationToAccountId, StakingPot, TrustBackedAssetsPalletLocation, + TrustBackedAssetsPalletLocationV3, WestendLocation, WestendLocationV3, XcmConfig, }, AllPalletsWithoutSystem, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, @@ -50,11 +50,11 @@ use frame_support::{ use parachains_common::{AccountId, AssetIdForTrustBackedAssets, AuraId, Balance}; use sp_consensus_aura::SlotDuration; use sp_runtime::traits::MaybeEquivalence; -use std::convert::Into; +use std::{convert::Into, ops::Mul}; use testnet_parachains_constants::westend::{consensus::*, currency::UNITS, fee::WeightToFee}; use xcm::latest::prelude::{Assets as XcmAssets, *}; use xcm_builder::V4V3LocationConverter; -use xcm_executor::traits::{JustTry, WeightTrader}; +use xcm_executor::traits::{ConvertLocation, JustTry, WeightTrader}; const ALICE: [u8; 32] = [1u8; 32]; const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; @@ -86,6 +86,52 @@ fn slot_durations() -> SlotDurations { } } +fn setup_pool_for_paying_fees_with_foreign_assets( + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountId, + xcm::v3::Location, + Balance, + ), +) { + let existential_deposit = ExistentialDeposit::get(); + + // setup a pool to pay fees with `foreign_asset_id_location` tokens + let pool_owner: AccountId = [14u8; 32].into(); + let native_asset = xcm::v3::Location::parent(); + let pool_liquidity: Balance = + existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); + + let _ = Balances::force_set_balance( + RuntimeOrigin::root(), + pool_owner.clone().into(), + (existential_deposit + pool_liquidity).mul(2).into(), + ); + + assert_ok!(ForeignAssets::mint( + RuntimeOrigin::signed(foreign_asset_owner), + foreign_asset_id_location.into(), + pool_owner.clone().into(), + (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), + )); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()) + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(pool_owner.clone()), + Box::new(native_asset.into()), + Box::new(foreign_asset_id_location.into()), + pool_liquidity, + pool_liquidity, + 1, + 1, + pool_owner, + )); +} + #[test] fn test_buy_and_refund_weight_in_native() { ExtBuilder::::default() @@ -1071,30 +1117,133 @@ fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() Some(xcm_config::TreasuryAccount::get()), ) } + #[test] -fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { +fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_swap_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = + xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsInstance, >( collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), ExistentialDeposit::get(), AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), + block_author_account.clone(), // receiving ROCs - (xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]), 1000000000000, 1_000_000_000), - bridging_to_asset_hub_rococo, + foreign_asset_create_params.clone(), + 1000000000000, + || { + // setup pool for paying fees to touch `SwapFirstAssetTrader` + setup_pool_for_paying_fees_with_foreign_assets(foreign_asset_create_params); + // staking pot account for collecting local native fees from `BuyExecution` + let _ = Balances::force_set_balance(RuntimeOrigin::root(), StakingPot::get().into(), ExistentialDeposit::get()); + // prepare bridge configuration + bridging_to_asset_hub_rococo() + }, ( [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), GlobalConsensus(Rococo), [Parachain(1000)].into() - ) + ), + || { + // check staking pot for ED + assert_eq!(Balances::free_balance(&staking_pot), ExistentialDeposit::get()); + // check now foreign asset for staking pot + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + }, + || { + // `SwapFirstAssetTrader` - staking pot receives xcm fees in ROCs + assert!( + Balances::free_balance(&staking_pot) > ExistentialDeposit::get() + ); + // staking pot receives no foreign assets + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &staking_pot + ), + 0 + ); + } ) } + +#[test] +fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_sufficient_asset_works() { + const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; + let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); + let staking_pot = StakingPot::get(); + + let foreign_asset_id_location = + xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + let foreign_asset_id_minimum_balance = 1_000_000_000; + // sovereign account as foreign asset owner (can be whoever for this scenario) + let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); + let foreign_asset_create_params = + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + + asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ForeignAssetsInstance, + >( + collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), + ExistentialDeposit::get(), + AccountId::from([73; 32]), + block_author_account.clone(), + // receiving ROCs + foreign_asset_create_params, + 1000000000000, + bridging_to_asset_hub_rococo, + ( + [PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX)].into(), + GlobalConsensus(Rococo), + [Parachain(1000)].into() + ), + || { + // check block author before + assert_eq!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ), + 0 + ); + }, + || { + // `TakeFirstAssetTrader` puts fees to the block author + assert!( + ForeignAssets::balance( + foreign_asset_id_location.into(), + &block_author_account + ) > 0 + ); + // `SwapFirstAssetTrader` did not work + assert_eq!(Balances::free_balance(&staking_pot), 0); + } + ) +} + #[test] fn report_bridge_status_from_xcm_bridge_router_for_rococo_works() { asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index f1b3953ce473..89c4925cb1ac 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -16,7 +16,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = frame-support = { path = "../../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../../substrate/frame/system", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } @@ -56,7 +55,6 @@ std = [ "cumulus-primitives-core/std", "frame-support/std", "frame-system/std", - "pallet-asset-conversion/std", "pallet-assets/std", "pallet-balances/std", "pallet-collator-selection/std", diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 4a0f31c449ea..66ed3417951a 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -30,7 +30,6 @@ use parachains_runtimes_test_utils::{ SlotDurations, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{traits::StaticLookup, Saturating}; -use sp_std::ops::Mul; use xcm::{latest::prelude::*, VersionedAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; @@ -323,20 +322,22 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< Runtime, AllPalletsWithoutSystem, XcmConfig, - LocationToAccountId, ForeignAssetsPalletInstance, >( collator_session_keys: CollatorSessionKeys, existential_deposit: BalanceOf, target_account: AccountIdOf, block_author_account: AccountIdOf, - ( - foreign_asset_id_location, - transfered_foreign_asset_id_amount, - foreign_asset_id_minimum_balance, - ): (xcm::v3::Location, u128, u128), - prepare_configuration: fn() -> TestBridgingConfig, + (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( + AccountIdOf, + xcm::v3::Location, + u128, + ), + foreign_asset_id_amount_to_transfer: u128, + prepare_configuration: impl FnOnce() -> TestBridgingConfig, (bridge_instance, universal_origin, descend_origin): (Junctions, Junction, Junctions), /* bridge adds origin manipulation on the way */ + additional_checks_before: impl FnOnce(), + additional_checks_after: impl FnOnce(), ) where Runtime: frame_system::Config + pallet_balances::Config @@ -346,15 +347,13 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_assets::Config - + pallet_asset_conversion::Config, + + pallet_assets::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]> + From<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From + Into, XcmConfig: xcm_executor::Config, - LocationToAccountId: ConvertLocation>, >::AssetId: From + Into, >::AssetIdParameter: @@ -365,9 +364,6 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< + Into, <::Lookup as StaticLookup>::Source: From<::AccountId>, - ::AssetKind: - From + Into, - ::Balance: From, ForeignAssetsPalletInstance: 'static, { ExtBuilder::::default() @@ -382,88 +378,31 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< block_author_account.clone().into(), ); - // prepare bridge config - let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); - // drip 'ED' user target account let _ = >::deposit_creating( &target_account, existential_deposit, ); - // sovereign account as foreign asset owner (can be whoever for this scenario, doesnt - // matter) - let sovereign_account_as_owner_of_foreign_asset = - LocationToAccountId::convert_location(&Location::parent()).unwrap(); - - // staking pot account for collecting local native fees from `BuyExecution` - let staking_pot = >::account_id(); - let _ = >::deposit_creating( - &staking_pot, - existential_deposit, - ); - // create foreign asset for wrapped/derivated representation assert_ok!( >::force_create( RuntimeHelper::::root_origin(), foreign_asset_id_location.into(), - sovereign_account_as_owner_of_foreign_asset.clone().into(), + foreign_asset_owner.into(), true, // is_sufficient=true foreign_asset_id_minimum_balance.into() ) ); - // setup a pool to pay fees with `foreign_asset_id_location` tokens - let pool_owner: AccountIdOf = [1u8; 32].into(); - let native_asset = xcm::v3::Location::parent(); - let pool_liquidity: u128 = - existential_deposit.into().max(foreign_asset_id_minimum_balance).mul(100_000); - - let _ = >::deposit_creating( - &pool_owner, - (existential_deposit.into() + pool_liquidity).mul(2).into(), - ); - - assert_ok!(>::mint( - RuntimeHelper::::origin_of( - sovereign_account_as_owner_of_foreign_asset - ), - foreign_asset_id_location.into(), - pool_owner.clone().into(), - (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), - )); - - assert_ok!(>::create_pool( - RuntimeHelper::::origin_of(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()) - )); - - assert_ok!(>::add_liquidity( - RuntimeHelper::::origin_of(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()), - pool_liquidity.into(), - pool_liquidity.into(), - 1.into(), - 1.into(), - pool_owner, - )); + // prepare bridge config + let TestBridgingConfig { local_bridge_hub_location, .. } = prepare_configuration(); // Balances before assert_eq!( >::free_balance(&target_account), existential_deposit.clone() ); - assert_eq!( - >::free_balance(&block_author_account), - 0.into() - ); - assert_eq!( - >::free_balance(&staking_pot), - existential_deposit.clone() - ); // ForeignAssets balances before assert_eq!( @@ -473,27 +412,16 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< ), 0.into() ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &block_author_account - ), - 0.into() - ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &staking_pot - ), - 0.into() - ); + + // additional check before + additional_checks_before(); let foreign_asset_id_location_latest: Location = foreign_asset_id_location.try_into().unwrap(); let expected_assets = Assets::from(vec![Asset { id: AssetId(foreign_asset_id_location_latest.clone()), - fun: Fungible(transfered_foreign_asset_id_amount), + fun: Fungible(foreign_asset_id_amount_to_transfer), }]); let expected_beneficiary = Location::new( 0, @@ -510,7 +438,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< BuyExecution { fees: Asset { id: AssetId(foreign_asset_id_location_latest.clone()), - fun: Fungible(transfered_foreign_asset_id_amount), + fun: Fungible(foreign_asset_id_amount_to_transfer), }, weight_limit: Unlimited, }, @@ -544,19 +472,10 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< assert_ok!(outcome.ensure_complete()); // Balances after - // staking pot receives xcm fees in dot - assert!( - >::free_balance(&staking_pot) != - existential_deposit - ); assert_eq!( >::free_balance(&target_account), existential_deposit.clone() ); - assert_eq!( - >::free_balance(&block_author_account), - 0.into() - ); // ForeignAssets balances after assert!( @@ -565,20 +484,9 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< &target_account ) > 0.into() ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &staking_pot - ), - 0.into() - ); - assert_eq!( - >::balance( - foreign_asset_id_location.into(), - &block_author_account - ), - 0.into() - ); + + // additional check after + additional_checks_after(); }) } From fee810a5ea177f95ddf13ea51270b84f1e117bae Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 20 Feb 2024 17:16:21 +0100 Subject: [PATCH 07/51] rpc server: make possible to disable/enable batch requests (#3364) The rationale behind this, is that it may be useful for some users actually disable RPC batch requests or limit them by length instead of the total size bytes of the batch. This PR adds two new CLI options: ``` --rpc-disable-batch-requests - disable batch requests on the server --rpc-max-batch-request-len - limit batches to LEN on the server. ``` --- .gitlab/pipeline/check.yml | 1 + cumulus/client/cli/src/lib.rs | 10 ++++++- cumulus/test/service/src/lib.rs | 3 ++- polkadot/node/test/service/src/lib.rs | 5 ++-- prdoc/pr_3364.prdoc | 12 +++++++++ .../bin/node/cli/benches/block_production.rs | 3 ++- .../bin/node/cli/benches/transaction_pool.rs | 3 ++- substrate/client/cli/src/commands/run_cmd.rs | 26 ++++++++++++++++++- substrate/client/cli/src/config.rs | 11 ++++++-- substrate/client/cli/src/runner.rs | 1 + substrate/client/rpc-servers/src/lib.rs | 6 ++++- substrate/client/service/src/config.rs | 3 +++ substrate/client/service/src/lib.rs | 1 + substrate/client/service/test/src/lib.rs | 3 ++- 14 files changed, 77 insertions(+), 11 deletions(-) create mode 100644 prdoc/pr_3364.prdoc diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 1ed12e68c2ce..cdb5d1b05d09 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -133,6 +133,7 @@ check-runtime-migration-westend: WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" + allow_failure: true check-runtime-migration-rococo: stage: check diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1807b8a1718e..a7b2eb19de88 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -30,7 +30,7 @@ use codec::Encode; use sc_chain_spec::ChainSpec; use sc_client_api::HeaderBackend; use sc_service::{ - config::{PrometheusConfig, TelemetryEndpoints}, + config::{PrometheusConfig, RpcBatchRequestConfig, TelemetryEndpoints}, BasePath, TransactionPoolOptions, }; use sp_core::hexdisplay::HexDisplay; @@ -443,6 +443,14 @@ impl sc_cli::CliConfiguration for NormalizedRunCmd { Ok(self.base.rpc_max_subscriptions_per_connection) } + fn rpc_buffer_capacity_per_connection(&self) -> sc_cli::Result { + Ok(self.base.rpc_message_buffer_capacity_per_connection) + } + + fn rpc_batch_config(&self) -> sc_cli::Result { + self.base.rpc_batch_config() + } + fn transaction_pool(&self, is_dev: bool) -> sc_cli::Result { self.base.transaction_pool(is_dev) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 229bde4d19c5..1c2e1db97414 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -67,7 +67,7 @@ use sc_network::{ use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, NetworkConfiguration, - OffchainWorkerConfig, PruningMode, WasmExecutionMethod, + OffchainWorkerConfig, PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, }, BasePath, ChainSpec as ChainSpecService, Configuration, Error as ServiceError, PartialComponents, Role, RpcHandlers, TFullBackend, TFullClient, TaskManager, @@ -801,6 +801,7 @@ pub fn node_config( rpc_max_subs_per_conn: Default::default(), rpc_port: 9945, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index a3af977ab3e4..ca904bae28db 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -44,8 +44,8 @@ use sc_network::{ }; use sc_service::{ config::{ - DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, WasmExecutionMethod, - WasmtimeInstantiationStrategy, + DatabaseSource, KeystoreConfig, MultiaddrWithPeerId, RpcBatchRequestConfig, + WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, BlocksPruning, Configuration, Role, RpcHandlers, TaskManager, }; @@ -186,6 +186,7 @@ pub fn node_config( rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/prdoc/pr_3364.prdoc b/prdoc/pr_3364.prdoc new file mode 100644 index 000000000000..1e7a6a5278d7 --- /dev/null +++ b/prdoc/pr_3364.prdoc @@ -0,0 +1,12 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: rpc server expose batch request configuration + +doc: + - audience: Node Operator + description: | + Add functionality to limit RPC batch requests by two new CLI options: + --rpc-disable-batch-request - disable batch requests on the server + --rpc-max-batch-request-len - limit batches to LEN on the server +crates: [ ] diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index d28cfbddfd22..23a62cc0bd24 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -28,7 +28,7 @@ use sc_consensus::{ use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, - PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, + PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, }; @@ -84,6 +84,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index 1e25b7ce6fd8..de4eef1944d4 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -26,7 +26,7 @@ use node_primitives::AccountId; use sc_service::{ config::{ BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, - PruningMode, TransactionPoolOptions, + PruningMode, RpcBatchRequestConfig, TransactionPoolOptions, }, BasePath, Configuration, Role, }; @@ -80,6 +80,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index c50198cf3cb8..221c32affd5a 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -30,7 +30,7 @@ use crate::{ use clap::Parser; use regex::Regex; use sc_service::{ - config::{BasePath, PrometheusConfig, TransactionPoolOptions}, + config::{BasePath, PrometheusConfig, RpcBatchRequestConfig, TransactionPoolOptions}, ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; @@ -125,6 +125,14 @@ pub struct RunCmd { #[arg(long, default_value_t = RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN)] pub rpc_message_buffer_capacity_per_connection: u32, + /// Disable RPC batch requests + #[arg(long, alias = "rpc_no_batch_requests", conflicts_with_all = &["rpc_max_batch_request_len"])] + pub rpc_disable_batch_requests: bool, + + /// Limit the max length per RPC batch request + #[arg(long, conflicts_with_all = &["rpc_disable_batch_requests"], value_name = "LEN")] + pub rpc_max_batch_request_len: Option, + /// Specify browser *origins* allowed to access the HTTP & WS RPC servers. /// /// A comma-separated list of origins (protocol://domain or special `null` @@ -411,6 +419,22 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_max_subscriptions_per_connection) } + fn rpc_buffer_capacity_per_connection(&self) -> Result { + Ok(self.rpc_message_buffer_capacity_per_connection) + } + + fn rpc_batch_config(&self) -> Result { + let cfg = if self.rpc_disable_batch_requests { + RpcBatchRequestConfig::Disabled + } else if let Some(l) = self.rpc_max_batch_request_len { + RpcBatchRequestConfig::Limit(l) + } else { + RpcBatchRequestConfig::Unlimited + }; + + Ok(cfg) + } + fn rpc_rate_limit(&self) -> Result> { Ok(self.rpc_rate_limit) } diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 78015cf8373d..5def9ce9b726 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -28,7 +28,8 @@ use sc_service::{ config::{ BasePath, Configuration, DatabaseSource, KeystoreConfig, NetworkConfiguration, NodeKeyConfig, OffchainWorkerConfig, OutputFormat, PrometheusConfig, PruningMode, Role, - RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, + RpcBatchRequestConfig, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, + WasmExecutionMethod, }, BlocksPruning, ChainSpec, TracingReceiver, }; @@ -338,7 +339,12 @@ pub trait CliConfiguration: Sized { Ok(RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN) } - /// Rate limit calls per minute. + /// RPC server batch request configuration. + fn rpc_batch_config(&self) -> Result { + Ok(RpcBatchRequestConfig::Unlimited) + } + + /// RPC rate limit configuration. fn rpc_rate_limit(&self) -> Result> { Ok(None) } @@ -515,6 +521,7 @@ pub trait CliConfiguration: Sized { rpc_max_subs_per_conn: self.rpc_max_subscriptions_per_connection()?, rpc_port: DCV::rpc_listen_port(), rpc_message_buffer_capacity: self.rpc_buffer_capacity_per_connection()?, + rpc_batch_config: self.rpc_batch_config()?, rpc_rate_limit: self.rpc_rate_limit()?, prometheus_config: self .prometheus_config(DCV::prometheus_listen_port(), &chain_spec)?, diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index b4937db71e69..4201a0f4062f 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -271,6 +271,7 @@ mod tests { rpc_max_subs_per_conn: Default::default(), rpc_message_buffer_capacity: Default::default(), rpc_port: 9944, + rpc_batch_config: sc_service::config::RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index a22b7309ac19..e65d954bed52 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -47,7 +47,7 @@ pub use jsonrpsee::{ id_providers::{RandomIntegerIdProvider, RandomStringIdProvider}, traits::IdProvider, }, - server::middleware::rpc::RpcServiceBuilder, + server::{middleware::rpc::RpcServiceBuilder, BatchRequestConfig}, }; pub use middleware::{MetricsLayer, RateLimitLayer, RpcMetrics}; @@ -81,6 +81,8 @@ pub struct Config<'a, M: Send + Sync + 'static> { pub id_provider: Option>, /// Tokio runtime handle. pub tokio_handle: tokio::runtime::Handle, + /// Batch request config. + pub batch_config: BatchRequestConfig, /// Rate limit calls per minute. pub rate_limit: Option, } @@ -103,6 +105,7 @@ where { let Config { addrs, + batch_config, cors, max_payload_in_mb, max_payload_out_mb, @@ -139,6 +142,7 @@ where ) .set_http_middleware(http_middleware) .set_message_buffer_capacity(message_buffer_capacity) + .set_batch_request_config(batch_config) .custom_tokio_runtime(tokio_handle.clone()); if let Some(provider) = id_provider { diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index 74c5dd775b0e..35262ff493b4 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -18,6 +18,7 @@ //! Service configuration. +pub use jsonrpsee::server::BatchRequestConfig as RpcBatchRequestConfig; use prometheus_endpoint::Registry; use sc_chain_spec::ChainSpec; pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; @@ -103,6 +104,8 @@ pub struct Configuration { pub rpc_port: u16, /// The number of messages the JSON-RPC server is allowed to keep in memory. pub rpc_message_buffer_capacity: u32, + /// JSON-RPC server batch config. + pub rpc_batch_config: RpcBatchRequestConfig, /// RPC rate limit per minute. pub rpc_rate_limit: Option, /// Prometheus endpoint configuration. `None` if disabled. diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index d0adf4f7d5c5..9480d4a0b072 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -393,6 +393,7 @@ where let server_config = sc_rpc_server::Config { addrs: [addr, backup_addr], + batch_config: config.rpc_batch_config, max_connections: config.rpc_max_connections, max_payload_in_mb: config.rpc_max_request_size, max_payload_out_mb: config.rpc_max_response_size, diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 6148bb05fcfb..349538965ee1 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -29,7 +29,7 @@ use sc_network::{ use sc_network_sync::SyncingService; use sc_service::{ client::Client, - config::{BasePath, DatabaseSource, KeystoreConfig}, + config::{BasePath, DatabaseSource, KeystoreConfig, RpcBatchRequestConfig}, BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, RuntimeGenesis, SpawnTaskHandle, TaskManager, }; @@ -254,6 +254,7 @@ fn node_config< rpc_max_subs_per_conn: Default::default(), rpc_port: 9944, rpc_message_buffer_capacity: Default::default(), + rpc_batch_config: RpcBatchRequestConfig::Unlimited, rpc_rate_limit: None, prometheus_config: None, telemetry_endpoints: None, From b77b3a2de5be4d413dede8262a92f87c0d63959f Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 20 Feb 2024 22:00:44 +0100 Subject: [PATCH 08/51] Nits found during the bumping fellows repo (#3410) --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 6 ++++-- .../runtimes/assets/asset-hub-westend/src/lib.rs | 1 + cumulus/parachains/runtimes/assets/common/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 1 + .../runtimes/contracts/contracts-rococo/src/lib.rs | 1 + .../parachains/runtimes/people/people-rococo/src/lib.rs | 2 ++ .../parachains/runtimes/people/people-westend/src/lib.rs | 2 ++ .../parachains/runtimes/testing/penpal/src/xcm_config.rs | 2 +- polkadot/runtime/common/src/impls.rs | 8 ++++---- polkadot/xcm/src/lib.rs | 2 +- substrate/frame/asset-conversion/src/lib.rs | 2 +- 11 files changed, 19 insertions(+), 10 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index ed893091533a..7e550c329177 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -383,8 +383,8 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v3::MultiLocation; - type AssetIdParameter = xcm::v3::MultiLocation; + type AssetId = xcm::v3::Location; + type AssetIdParameter = xcm::v3::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( @@ -1043,6 +1043,7 @@ mod benches { [pallet_assets, Pool] [pallet_asset_conversion, AssetConversion] [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_nft_fractionalization, NftFractionalization] [pallet_nfts, Nfts] @@ -1052,6 +1053,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] // XCM diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index bcd3a9aa896f..a3166106073e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1084,6 +1084,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index a0c912b6f0fe..fa2752179eb6 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -371,7 +371,7 @@ mod tests { for (asset, expected_result) in test_data { assert_eq!( - >::matches_fungibles( + >::matches_fungibles( &asset.clone().try_into().unwrap() ), expected_result, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 8ffea24b3cc7..997320ffcf26 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -522,6 +522,7 @@ mod benches { [pallet_utility, Utility] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 15a5d9b04a91..94cb0e502e4c 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -427,6 +427,7 @@ mod benches { [pallet_sudo, Sudo] [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] [pallet_contracts, Contracts] [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 9e7dd6c884bd..570dc0fa12c3 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -448,6 +448,7 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_identity, Identity] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] @@ -455,6 +456,7 @@ mod benches { // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 4ff743aefe1f..a47df66f5039 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -448,6 +448,7 @@ mod benches { [frame_system, SystemBench::] [pallet_balances, Balances] [pallet_identity, Identity] + [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] [pallet_session, SessionBench::] [pallet_utility, Utility] @@ -455,6 +456,7 @@ mod benches { // Polkadot [polkadot_runtime_common::identity_migrator, IdentityMigrator] // Cumulus + [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_collator_selection, CollatorSelection] // XCM diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index c0bcd7879626..ef4f466d4842 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -134,7 +134,7 @@ pub type ForeignFungiblesTransactor = FungiblesAdapter< ForeignAssets, // Use this currency when it is a fungible asset matching the given location or name: ForeignAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: + // Convert an XCM Location into a local account id: LocationToAccountId, // Our chain's account ID type (we can't get away without mentioning it explicitly): AccountId, diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index 1ddad37831b6..4ba85a3c8353 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -107,7 +107,7 @@ pub fn era_payout( )] pub enum VersionedLocatableAsset { #[codec(index = 3)] - V3 { location: xcm::v3::MultiLocation, asset_id: xcm::v3::AssetId }, + V3 { location: xcm::v3::Location, asset_id: xcm::v3::AssetId }, #[codec(index = 4)] V4 { location: xcm::v4::Location, asset_id: xcm::v4::AssetId }, } @@ -140,7 +140,7 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation ) -> Result { let latest = match location.clone() { VersionedLocation::V2(l) => { - let v3: xcm::v3::MultiLocation = l.try_into().map_err(|_| location)?; + let v3: xcm::v3::Location = l.try_into().map_err(|_| location)?; v3.try_into().map_err(|_| location)? }, VersionedLocation::V3(l) => l.try_into().map_err(|_| location)?, @@ -191,11 +191,11 @@ pub mod benchmarks { { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { VersionedLocatableAsset::V3 { - location: xcm::v3::MultiLocation::new( + location: xcm::v3::Location::new( Parents::get(), [xcm::v3::Junction::Parachain(ParaId::get())], ), - asset_id: xcm::v3::MultiLocation::new( + asset_id: xcm::v3::Location::new( 0, [ xcm::v3::Junction::PalletInstance(seed.try_into().unwrap()), diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 561d5175b419..c90ad25c0d83 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -494,7 +494,7 @@ pub trait WrapVersion { } /// Check and return the `Version` that should be used for the `Xcm` datum for the destination -/// `MultiLocation`, which will interpret it. +/// `Location`, which will interpret it. pub trait GetVersion { fn get_version_for(dest: &latest::Location) -> Option; } diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index f0695678fbdd..f13d40d3e7e2 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -40,7 +40,7 @@ //! non-native asset 1, you would pass in a path of `[DOT, 1]` or `[1, DOT]`. If you want to swap //! from non-native asset 1 to non-native asset 2, you would pass in a path of `[1, DOT, 2]`. //! -//! (For an example of configuring this pallet to use `MultiLocation` as an asset id, see the +//! (For an example of configuring this pallet to use `Location` as an asset id, see the //! cumulus repo). //! //! Here is an example `state_call` that asks for a quote of a pool of native versus asset 1: From d50cf1402d55c1a031283645ef16cc4a74163959 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Feb 2024 23:07:48 +0000 Subject: [PATCH 09/51] Bump the known_good_semver group with 5 updates (#3397) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the known_good_semver group with 5 updates: | Package | From | To | | --- | --- | --- | | [serde](https://github.com/serde-rs/serde) | `1.0.196` | `1.0.197` | | [serde_json](https://github.com/serde-rs/json) | `1.0.113` | `1.0.114` | | [syn](https://github.com/dtolnay/syn) | `2.0.49` | `2.0.50` | | [serde_yaml](https://github.com/dtolnay/serde-yaml) | `0.9.31` | `0.9.32` | | [serde_derive](https://github.com/serde-rs/serde) | `1.0.196` | `1.0.197` | Updates `serde` from 1.0.196 to 1.0.197
Release notes

Sourced from serde's releases.

v1.0.197

  • Fix unused_imports warnings when compiled by rustc 1.78
  • Optimize code size of some Display impls (#2697, thanks @​nyurik)
Commits
  • 5fa711d Release 1.0.197
  • f5d8ae4 Resolve prelude redundant import warnings
  • 1d54973 Merge pull request #2697 from nyurik/format-str
  • b8fafef A few minor write_str optimizations and inlining
  • c42ebb8 Update ui test suite to nightly-2024-02-12
  • 9e68062 Ignore incompatible_msrv clippy lint for conditionally compiled code
  • 846f865 Ignore dead_code warnings in test
  • See full diff in compare view

Updates `serde_json` from 1.0.113 to 1.0.114
Release notes

Sourced from serde_json's releases.

v1.0.114

  • Fix unused_imports warnings when compiled by rustc 1.78
Commits
  • e1b3a6d Release 1.0.114
  • 6fb7026 Work around prelude redundant import warnings
  • 34a04c5 Ignore incompatible_msrv clippy false positives in test
  • ca05f69 Remove unused Float::is_sign_negative trait method
  • See full diff in compare view

Updates `syn` from 2.0.49 to 2.0.50
Release notes

Sourced from syn's releases.

2.0.50

  • Fix unused_imports warnings when compiled by rustc 1.78
Commits
  • 0dae015 Release 2.0.50
  • 2b493c3 Update location of Pat variant aliases
  • 07a435d Fix doc links now that crate::* is no longer imported
  • 713f932 Eliminate glob imports
  • 6eb82a9 Eliminate glob imports from codegen
  • 896a730 Eliminate glob imports from examples
  • 866c80c Update test suite to nightly-2024-02-18
  • d4b499e Update test suite to nightly-2024-02-17
  • 38956e6 Update test suite to nightly-2024-02-16
  • See full diff in compare view

Updates `serde_yaml` from 0.9.31 to 0.9.32
Release notes

Sourced from serde_yaml's releases.

0.9.32

  • Fix unused_imports warnings when compiled by rustc 1.78
Commits

Updates `serde_derive` from 1.0.196 to 1.0.197
Release notes

Sourced from serde_derive's releases.

v1.0.197

  • Fix unused_imports warnings when compiled by rustc 1.78
  • Optimize code size of some Display impls (#2697, thanks @​nyurik)
Commits
  • 5fa711d Release 1.0.197
  • f5d8ae4 Resolve prelude redundant import warnings
  • 1d54973 Merge pull request #2697 from nyurik/format-str
  • b8fafef A few minor write_str optimizations and inlining
  • c42ebb8 Update ui test suite to nightly-2024-02-12
  • 9e68062 Ignore incompatible_msrv clippy lint for conditionally compiled code
  • 846f865 Ignore dead_code warnings in test
  • See full diff in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 140 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 6 +-- 2 files changed, 73 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f8adf0e5f44..9f2a1a068918 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,7 +191,7 @@ checksum = "c0391754c09fab4eae3404d19d0d297aa1c670c1775ab51d8a5312afeca23157" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -206,7 +206,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "syn-solidity", "tiny-keccak", ] @@ -333,7 +333,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1217,7 +1217,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1234,7 +1234,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -1416,7 +1416,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -2669,7 +2669,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -3900,7 +3900,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4406,7 +4406,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4446,7 +4446,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4463,7 +4463,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4671,7 +4671,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4732,7 +4732,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.49", + "syn 2.0.50", "termcolor", "toml 0.8.8", "walkdir", @@ -4957,7 +4957,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -4968,7 +4968,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5158,7 +5158,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5551,7 +5551,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.49", + "syn 2.0.50", "trybuild", ] @@ -5706,7 +5706,7 @@ dependencies = [ "quote", "regex", "sp-crypto-hashing", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5717,7 +5717,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5726,7 +5726,7 @@ version = "11.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -5959,7 +5959,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7900,7 +7900,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7914,7 +7914,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7925,7 +7925,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -7936,7 +7936,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -9693,7 +9693,7 @@ version = "18.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -10868,7 +10868,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -11948,7 +11948,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -11989,7 +11989,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -13829,7 +13829,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6380dbe1fb03ecc74ad55d841cfc75480222d153ba69ddcb00977866cbdabdb8" dependencies = [ "polkavm-derive-impl 0.5.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -13850,7 +13850,7 @@ dependencies = [ "polkavm-common 0.5.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -13862,7 +13862,7 @@ dependencies = [ "polkavm-common 0.8.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -13872,7 +13872,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66" dependencies = [ "polkavm-derive-impl 0.8.0", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14077,7 +14077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14168,7 +14168,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14240,7 +14240,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14340,7 +14340,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -14683,7 +14683,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -15593,7 +15593,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -16863,7 +16863,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -17247,9 +17247,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -17274,13 +17274,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -17305,9 +17305,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -17337,9 +17337,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.31" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap 2.2.3", "itoa", @@ -17370,7 +17370,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18209,7 +18209,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18602,7 +18602,7 @@ version = "0.0.0" dependencies = [ "quote", "sp-crypto-hashing", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18620,7 +18620,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18629,7 +18629,7 @@ version = "14.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18903,7 +18903,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -18915,7 +18915,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -19187,7 +19187,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -19632,7 +19632,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20030,9 +20030,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.49" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -20048,7 +20048,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20313,7 +20313,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20474,7 +20474,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20681,7 +20681,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -20723,7 +20723,7 @@ dependencies = [ "proc-macro-crate 3.0.0", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -21288,7 +21288,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-shared", ] @@ -21322,7 +21322,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -22337,7 +22337,7 @@ dependencies = [ "proc-macro2", "quote", "staging-xcm", - "syn 2.0.49", + "syn 2.0.50", "trybuild", ] @@ -22459,7 +22459,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] @@ -22479,7 +22479,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.49", + "syn 2.0.50", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7e6667559229..774ce1b52a39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -538,12 +538,12 @@ polkavm-linker = "0.8.2" polkavm-derive = "0.8.0" log = { version = "0.4.20", default-features = false } quote = { version = "1.0.33" } -serde = { version = "1.0.196", default-features = false } +serde = { version = "1.0.197", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.113", default-features = false } +serde_json = { version = "1.0.114", default-features = false } serde_yaml = { version = "0.9" } -syn = { version = "2.0.49" } +syn = { version = "2.0.50" } thiserror = { version = "1.0.48" } [profile.release] From 60563b893a448c34474c6ab3de8141b836e17f1b Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 21 Feb 2024 00:32:56 +0100 Subject: [PATCH 10/51] nit: add prdoc for 3370 (#3401) Did not realize that https://github.com/paritytech/polkadot-sdk/pull/3370 was indeed breaking. cc @muraca --------- Signed-off-by: Oliver Tale-Yazdi --- prdoc/pr_3370.prdoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 prdoc/pr_3370.prdoc diff --git a/prdoc/pr_3370.prdoc b/prdoc/pr_3370.prdoc new file mode 100644 index 000000000000..f40d3821b9fe --- /dev/null +++ b/prdoc/pr_3370.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Remove `key` getter from pallet-sudo" + +doc: + - audience: Runtime Dev + description: | + Removed the `key` getter function from the sudo pallet. There is no replacement for getting + the key currently. + +crates: + - name: pallet-sudo From 579ef32ddd4ec55314d6f2ee427afbd300541e27 Mon Sep 17 00:00:00 2001 From: girazoki Date: Wed, 21 Feb 2024 00:34:09 +0100 Subject: [PATCH 11/51] remove bound on accountID in AssetFeeAsExistentialDepositMultiplier (#3303) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I dont think this bound is required by anything else and it allows now to use this struct with runtimes that have 20 bytes accounts --------- Co-authored-by: Bastian Köcher --- cumulus/parachains/common/src/xcm_config.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/cumulus/parachains/common/src/xcm_config.rs b/cumulus/parachains/common/src/xcm_config.rs index 15b090923d50..a9756af7aed2 100644 --- a/cumulus/parachains/common/src/xcm_config.rs +++ b/cumulus/parachains/common/src/xcm_config.rs @@ -45,8 +45,6 @@ where >::AssetId, >::Balance, >, - AccountIdOf: - From + Into, { fn charge_weight_in_fungibles( asset_id: as Inspect< From f3a6b6dceabccbc4b92471cf12340b8c390ff14a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 21 Feb 2024 18:14:48 +0800 Subject: [PATCH 12/51] contracts: Fix double charge of gas for host functions (#3361) This PR is fixing a bug in the sync mechanism between wasmi and pallet-contracts. This bug leads to essentially double charging all the gas that was used during the execution of the host function. When the `call` host function is used for recursion this will lead to a quadratic amount of gas consumption with regard to the nesting depth.We also took the chance to refactor the code in question and improve the rust docs. The bug was caused by not updating `GasMeter::executor_consumed` (previously `engine_consumed`) when leaving the host function. This lead to the value being stale (too low) when entering another host function. --------- Co-authored-by: PG Herveou --- prdoc/pr_3361.prdoc | 10 +++ .../contracts/fixtures/contracts/recurse.rs | 53 ++++++++++++ .../frame/contracts/proc-macro/src/lib.rs | 37 ++++---- substrate/frame/contracts/src/gas.rs | 86 +++++++++++++------ substrate/frame/contracts/src/tests.rs | 54 +++++++++++- substrate/frame/contracts/src/wasm/mod.rs | 2 +- 6 files changed, 192 insertions(+), 50 deletions(-) create mode 100644 prdoc/pr_3361.prdoc create mode 100644 substrate/frame/contracts/fixtures/contracts/recurse.rs diff --git a/prdoc/pr_3361.prdoc b/prdoc/pr_3361.prdoc new file mode 100644 index 000000000000..65baa9e94a0e --- /dev/null +++ b/prdoc/pr_3361.prdoc @@ -0,0 +1,10 @@ +title: Fix double charge of host function weight + +doc: + - audience: Runtime Dev + description: | + Fixed a double charge which can lead to quadratic gas consumption of + the `call` and `instantiate` host functions. + +crates: + - name: pallet-contracts diff --git a/substrate/frame/contracts/fixtures/contracts/recurse.rs b/substrate/frame/contracts/fixtures/contracts/recurse.rs new file mode 100644 index 000000000000..b1ded608c2fc --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/recurse.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This fixture calls itself as many times as passed as argument. + +#![no_std] +#![no_main] + +use common::{input, output}; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(calls_left: u32, ); + + // own address + output!(addr, [0u8; 32], api::address,); + + if calls_left == 0 { + return + } + + api::call_v2( + uapi::CallFlags::ALLOW_REENTRY, + addr, + 0u64, // How much ref_time to devote for the execution. 0 = all. + 0u64, // How much deposit_limit to devote for the execution. 0 = all. + None, // No deposit limit. + &0u64.to_le_bytes(), // Value transferred to the contract. + &(calls_left - 1).to_le_bytes(), + None, + ) + .unwrap(); +} diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 403db15ac2cb..de961776c322 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -638,37 +638,34 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) }; let sync_gas_before = if expand_blocks { quote! { - // Gas left in the gas meter right before switching to engine execution. - let __gas_before__ = { - let engine_consumed_total = + // Write gas from wasmi into pallet-contracts before entering the host function. + let __gas_left_before__ = { + let executor_total = __caller__.fuel_consumed().expect("Fuel metering is enabled; qed"); - let gas_meter = __caller__.data_mut().ext().gas_meter_mut(); - gas_meter - .charge_fuel(engine_consumed_total) + __caller__ + .data_mut() + .ext() + .gas_meter_mut() + .sync_from_executor(executor_total) .map_err(TrapReason::from) .map_err(#into_host)? - .ref_time() }; } } else { quote! { } }; - // Gas left in the gas meter right after returning from engine execution. + // Write gas from pallet-contracts into wasmi after leaving the host function. let sync_gas_after = if expand_blocks { quote! { - let mut gas_after = __caller__.data_mut().ext().gas_meter().gas_left().ref_time(); - let mut host_consumed = __gas_before__.saturating_sub(gas_after); - // Possible undercharge of at max 1 fuel here, if host consumed less than `instruction_weights.base` - // Not a problem though, as soon as host accounts its spent gas properly. - let fuel_consumed = host_consumed - .checked_div(__caller__.data_mut().ext().schedule().instruction_weights.base as u64) - .ok_or(Error::::InvalidSchedule) - .map_err(TrapReason::from) - .map_err(#into_host)?; + let fuel_consumed = __caller__ + .data_mut() + .ext() + .gas_meter_mut() + .sync_to_executor(__gas_left_before__) + .map_err(TrapReason::from)?; __caller__ - .consume_fuel(fuel_consumed) - .map_err(|_| TrapReason::from(Error::::OutOfGas)) - .map_err(#into_host)?; + .consume_fuel(fuel_consumed.into()) + .map_err(|_| TrapReason::from(Error::::OutOfGas))?; } } else { quote! { } diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index 9271b615d002..b9d91f38f16f 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -23,7 +23,7 @@ use frame_support::{ DefaultNoBound, }; use sp_core::Get; -use sp_runtime::{traits::Zero, DispatchError}; +use sp_runtime::{traits::Zero, DispatchError, Saturating}; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -37,6 +37,24 @@ impl ChargedAmount { } } +/// Used to capture the gas left before entering a host function. +/// +/// Has to be consumed in order to sync back the gas after leaving the host function. +#[must_use] +pub struct RefTimeLeft(u64); + +/// Resource that needs to be synced to the executor. +/// +/// Wrapped to make sure that the resource will be synced back the the executor. +#[must_use] +pub struct Syncable(u64); + +impl From for u64 { + fn from(from: Syncable) -> u64 { + from.0 + } +} + #[cfg(not(test))] pub trait TestAuxiliaries {} #[cfg(not(test))] @@ -84,8 +102,13 @@ pub struct GasMeter { gas_left: Weight, /// Due to `adjust_gas` and `nested` the `gas_left` can temporarily dip below its final value. gas_left_lowest: Weight, - /// Amount of fuel consumed by the engine from the last host function call. - engine_consumed: u64, + /// The amount of resources that was consumed by the execution engine. + /// + /// This should be equivalent to `self.gas_consumed().ref_time()` but expressed in whatever + /// unit the execution engine uses to track resource consumption. We have to track it + /// separately in order to avoid the loss of precision that happens when converting from + /// ref_time to the execution engine unit. + executor_consumed: u64, _phantom: PhantomData, #[cfg(test)] tokens: Vec, @@ -97,7 +120,7 @@ impl GasMeter { gas_limit, gas_left: gas_limit, gas_left_lowest: gas_limit, - engine_consumed: Default::default(), + executor_consumed: 0, _phantom: PhantomData, #[cfg(test)] tokens: Vec::new(), @@ -172,32 +195,41 @@ impl GasMeter { self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); } - /// This method is used for gas syncs with the engine. + /// Hand over the gas metering responsibility from the executor to this meter. /// - /// Updates internal `engine_comsumed` tracker of engine fuel consumption. + /// Needs to be called when entering a host function to update this meter with the + /// gas that was tracked by the executor. It tracks the latest seen total value + /// in order to compute the delta that needs to be charged. + pub fn sync_from_executor( + &mut self, + executor_total: u64, + ) -> Result { + let chargable_reftime = executor_total + .saturating_sub(self.executor_consumed) + .saturating_mul(u64::from(T::Schedule::get().instruction_weights.base)); + self.executor_consumed = executor_total; + self.gas_left + .checked_reduce(Weight::from_parts(chargable_reftime, 0)) + .ok_or_else(|| Error::::OutOfGas)?; + Ok(RefTimeLeft(self.gas_left.ref_time())) + } + + /// Hand over the gas metering responsibility from this meter to the executor. /// - /// Charges self with the `ref_time` Weight corresponding to wasmi fuel consumed on the engine - /// side since last sync. Passed value is scaled by multiplying it by the weight of a basic - /// operation, as such an operation in wasmi engine costs 1. + /// Needs to be called when leaving a host function in order to calculate how much + /// gas needs to be charged from the **executor**. It updates the last seen executor + /// total value so that it is correct when `sync_from_executor` is called the next time. /// - /// Returns the updated `gas_left` `Weight` value from the meter. - /// Normally this would never fail, as engine should fail first when out of gas. - pub fn charge_fuel(&mut self, wasmi_fuel_total: u64) -> Result { - // Take the part consumed since the last update. - let wasmi_fuel = wasmi_fuel_total.saturating_sub(self.engine_consumed); - if !wasmi_fuel.is_zero() { - self.engine_consumed = wasmi_fuel_total; - let reftime_consumed = - wasmi_fuel.saturating_mul(T::Schedule::get().instruction_weights.base as u64); - let ref_time_left = self - .gas_left - .ref_time() - .checked_sub(reftime_consumed) - .ok_or_else(|| Error::::OutOfGas)?; - - *(self.gas_left.ref_time_mut()) = ref_time_left; - } - Ok(self.gas_left) + /// It is important that this does **not** actually sync with the executor. That has + /// to be done by the caller. + pub fn sync_to_executor(&mut self, before: RefTimeLeft) -> Result { + let chargable_executor_resource = before + .0 + .saturating_sub(self.gas_left().ref_time()) + .checked_div(u64::from(T::Schedule::get().instruction_weights.base)) + .ok_or(Error::::InvalidSchedule)?; + self.executor_consumed.saturating_accrue(chargable_executor_resource); + Ok(Syncable(chargable_executor_resource)) } /// Returns the amount of gas that is required to run the same call. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index cd4e43ed4c27..b39fc79b62a0 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -35,8 +35,8 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::{Determinism, ReturnErrorCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, - DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, + Array, BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, + ContractInfoOf, DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, MigrationInProgress, Origin, Pallet, PristineCode, Schedule, }; use assert_matches::assert_matches; @@ -5977,3 +5977,53 @@ fn balance_api_returns_free_balance() { ); }); } + +#[test] +fn gas_consumed_is_linear_for_nested_calls() { + let (code, _code_hash) = compile_module::("recurse").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(code), + vec![], + vec![], + DebugInfo::Skip, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id; + + let max_call_depth = ::CallStack::size() as u32; + let [gas_0, gas_1, gas_2, gas_max] = { + [0u32, 1u32, 2u32, max_call_depth] + .iter() + .map(|i| { + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + i.encode(), + DebugInfo::Skip, + CollectEvents::Skip, + Determinism::Enforced, + ); + assert_ok!(result.result); + result.gas_consumed + }) + .collect::>() + .try_into() + .unwrap() + }; + + let gas_per_recursion = gas_2.checked_sub(&gas_1).unwrap(); + assert_eq!(gas_max, gas_0 + gas_per_recursion * max_call_depth as u64); + }); +} diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 5386f4d0ffdf..c96c28565095 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -386,7 +386,7 @@ impl Executable for WasmBlob { let engine_consumed_total = store.fuel_consumed().expect("Fuel metering is enabled; qed"); let gas_meter = store.data_mut().ext().gas_meter_mut(); - gas_meter.charge_fuel(engine_consumed_total)?; + let _ = gas_meter.sync_from_executor(engine_consumed_total)?; store.into_data().to_execution_result(result) }; From cbeccad65fa3e6f9fb795fe996d522365a72c69b Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 21 Feb 2024 12:24:14 +0200 Subject: [PATCH 13/51] sc-consensus-beefy: reduce log levels (#3418) fixes https://github.com/paritytech/polkadot-sdk/issues/3407 --- .../client/consensus/beefy/src/aux_schema.rs | 4 ++-- substrate/client/consensus/beefy/src/lib.rs | 8 +++++-- .../client/consensus/beefy/src/worker.rs | 21 ++++++++++--------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/substrate/client/consensus/beefy/src/aux_schema.rs b/substrate/client/consensus/beefy/src/aux_schema.rs index 944a00f8372f..534f668ae69c 100644 --- a/substrate/client/consensus/beefy/src/aux_schema.rs +++ b/substrate/client/consensus/beefy/src/aux_schema.rs @@ -20,7 +20,7 @@ use crate::{error::Error, worker::PersistedState, LOG_TARGET}; use codec::{Decode, Encode}; -use log::{info, trace}; +use log::{debug, trace}; use sc_client_api::{backend::AuxStore, Backend}; use sp_runtime::traits::Block as BlockT; @@ -30,7 +30,7 @@ const WORKER_STATE_KEY: &[u8] = b"beefy_voter_state"; const CURRENT_VERSION: u32 = 4; pub(crate) fn write_current_version(backend: &BE) -> Result<(), Error> { - info!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); + debug!(target: LOG_TARGET, "🥩 write aux schema version {:?}", CURRENT_VERSION); AuxStore::insert_aux(backend, &[(VERSION_KEY, CURRENT_VERSION.encode().as_slice())], &[]) .map_err(|e| Error::Backend(e.to_string())) } diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 1f10d8099d83..11a105561e47 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -460,10 +460,14 @@ where R::Api: BeefyApi, { let blockchain = backend.blockchain(); - // Walk up the chain looking for the validator set active at 'at_header'. Process both state and // header digests. - debug!(target: LOG_TARGET, "🥩 Trying to find validator set active at header: {:?}", at_header); + debug!( + target: LOG_TARGET, + "🥩 Trying to find validator set active at header(number {:?}, hash {:?})", + at_header.number(), + at_header.hash() + ); let mut header = at_header.clone(); loop { debug!(target: LOG_TARGET, "🥩 Looking for auth set change at block number: {:?}", *header.number()); diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 19a24b9bc1a5..d7a229003cc4 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -387,7 +387,7 @@ where .flatten() .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) { - info!( + debug!( target: LOG_TARGET, "🥩 Initialize BEEFY voter at last BEEFY finalized block: {:?}.", *header.number() @@ -439,7 +439,7 @@ where } if let Some(active) = find_authorities_change::(&header) { - info!( + debug!( target: LOG_TARGET, "🥩 Marking block {:?} as BEEFY Mandatory.", *header.number() @@ -471,7 +471,7 @@ where state.set_best_grandpa(best_grandpa.clone()); // Overwrite persisted data with newly provided `min_block_delta`. state.set_min_block_delta(min_block_delta); - info!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); + debug!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); // Make sure that all the headers that we need have been synced. let mut new_sessions = vec![]; @@ -489,7 +489,7 @@ where // Make sure we didn't miss any sessions during node restart. for (validator_set, new_session_start) in new_sessions.drain(..).rev() { - info!( + debug!( target: LOG_TARGET, "🥩 Handling missed BEEFY session after node restart: {:?}.", new_session_start @@ -630,13 +630,14 @@ where &mut self, notification: &FinalityNotification, ) -> Result<(), Error> { + let header = ¬ification.header; debug!( target: LOG_TARGET, - "🥩 Finality notification: header {:?} tree_route {:?}", - notification.header, + "🥩 Finality notification: header(number {:?}, hash {:?}) tree_route {:?}", + header.number(), + header.hash(), notification.tree_route, ); - let header = ¬ification.header; self.base .runtime @@ -757,7 +758,7 @@ where match rounds.add_vote(vote) { VoteImportResult::RoundConcluded(signed_commitment) => { let finality_proof = VersionedFinalityProof::V1(signed_commitment); - info!( + debug!( target: LOG_TARGET, "🥩 Round #{} concluded, finality_proof: {:?}.", block_number, finality_proof ); @@ -1165,7 +1166,7 @@ where return Ok(()) } else if let Some(local_id) = self.base.key_store.authority_id(validators) { if offender_id == local_id { - debug!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); + warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); return Ok(()) } } @@ -1234,7 +1235,7 @@ where // if the mandatory block (session_start) does not have a beefy justification yet, // we vote on it let target = if best_beefy < session_start { - debug!(target: LOG_TARGET, "🥩 vote target - mandatory block: #{:?}", session_start,); + debug!(target: LOG_TARGET, "🥩 vote target - mandatory block: #{:?}", session_start); session_start } else { let diff = best_grandpa.saturating_sub(best_beefy) + 1u32.into(); From bf7c49b33cee9a00d7e94d0f64b1fdb4bf3f9241 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Wed, 21 Feb 2024 20:48:18 +0700 Subject: [PATCH 14/51] Fix failing release notifications GHA (#3416) This PR should fix the issue with the failing GHA which sends notifications to the different matrix channels, when the new release is published --- .../workflows/release-99_notif-published.yml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml index b35120ca4e12..397795fafa4e 100644 --- a/.github/workflows/release-99_notif-published.yml +++ b/.github/workflows/release-99_notif-published.yml @@ -8,13 +8,11 @@ on: jobs: ping_matrix: runs-on: ubuntu-latest + environment: release strategy: matrix: channel: # Internal - - name: 'RelEng: Cumulus Release Coordination' - room: '!NAEMyPAHWOiOQHsvus:parity.io' - pre-releases: true - name: "RelEng: Polkadot Release Coordination" room: '!cqAmzdIcbOFwrdrubV:parity.io' pre-release: true @@ -31,18 +29,9 @@ jobs: pre-release: true # Public - # - name: '#KusamaValidatorLounge:polkadot.builders' - # room: '!LhjZccBOqFNYKLdmbb:polkadot.builders' - # pre-releases: false - # - name: '#kusama-announcements:matrix.parity.io' - # room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' - # pre-release: false - # - name: '#polkadotvalidatorlounge:web3.foundation' - # room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' - # pre-release: false - # - name: '#polkadot-announcements:matrix.parity.io' - # room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' - # pre-release: false + - name: '#polkadotvalidatorlounge:web3.foundation' + room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' + pre-releases: false steps: - name: Matrix notification to ${{ matrix.channel.name }} From 5a06771eccb27e58f09189ea34ce20d947b1db4f Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Wed, 21 Feb 2024 16:48:40 +0200 Subject: [PATCH 15/51] Snowbridge - Test pallet order (#3381) - Adds a test to check the correct digest for Snowbridge outbound messages. For the correct digest to be in the block, the the MessageQueue pallet should be configured after the EthereumOutbound queue pallet. The added test fails if the EthereumOutbound is configured after the MessageQueue pallet. - Adds a helper method `run_to_block_with_finalize` to simulate the block finalizing. The existing `run_to_block` method does not finalize and so it cannot successfully test this condition. Closes: https://github.com/paritytech/polkadot-sdk/issues/3208 --------- Co-authored-by: claravanstaden --- Cargo.lock | 3 + bridges/snowbridge/README.md | 24 ++-- bridges/snowbridge/pallets/system/src/lib.rs | 4 +- .../snowbridge/runtime/test-common/src/lib.rs | 112 +++++++++++++++-- .../snowbridge/scripts/contribute-upstream.sh | 80 ++++++++++++ .../scripts/verify-pallets-build.sh | 116 ------------------ .../runtimes/assets/test-utils/Cargo.toml | 2 + .../assets/test-utils/src/test_cases.rs | 18 ++- .../test-utils/src/test_cases_over_bridge.rs | 9 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hub-rococo/tests/snowbridge.rs | 32 ++++- .../bridge-hubs/test-utils/Cargo.toml | 2 + .../test-utils/src/test_cases/helpers.rs | 4 +- .../parachains/runtimes/test-utils/Cargo.toml | 2 + .../parachains/runtimes/test-utils/src/lib.rs | 71 ++++++++++- .../runtimes/test-utils/src/test_cases.rs | 6 +- 16 files changed, 332 insertions(+), 155 deletions(-) create mode 100755 bridges/snowbridge/scripts/contribute-upstream.sh delete mode 100755 bridges/snowbridge/scripts/verify-pallets-build.sh diff --git a/Cargo.lock b/Cargo.lock index 9f2a1a068918..93ee9cc99c44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1054,6 +1054,7 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-session", + "pallet-timestamp", "pallet-xcm", "pallet-xcm-bridge-hub-router", "parachains-common", @@ -2099,6 +2100,7 @@ dependencies = [ "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", + "pallet-timestamp", "pallet-utility", "parachains-common", "parachains-runtimes-test-utils", @@ -11430,6 +11432,7 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-session", + "pallet-timestamp", "pallet-xcm", "parity-scale-codec", "polkadot-parachain-primitives", diff --git a/bridges/snowbridge/README.md b/bridges/snowbridge/README.md index 49b9c2eaf553..6561df401120 100644 --- a/bridges/snowbridge/README.md +++ b/bridges/snowbridge/README.md @@ -1,32 +1,40 @@ -# Snowbridge -[![codecov](https://codecov.io/gh/Snowfork/snowbridge/branch/main/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/snowbridge) +# Snowbridge · +[![codecov](https://codecov.io/gh/Snowfork/polkadot-sdk/branch/snowbridge/graph/badge.svg?token=9hvgSws4rN)](https://codecov.io/gh/Snowfork/polkadot-sdk) ![GitHub](https://img.shields.io/github/license/Snowfork/snowbridge) Snowbridge is a trustless bridge between Polkadot and Ethereum. For documentation, visit https://docs.snowbridge.network. ## Components +The Snowbridge project lives in two repositories: + +- [Snowfork/Polkadot-sdk](https://github.com/Snowfork/polkadot-sdk): The Snowbridge parachain and pallets live in +a fork of the Polkadot SDK. Changes are eventually contributed back to +[paritytech/Polkadot-sdk](https://github.com/paritytech/polkadot-sdk) +- [Snowfork/snowbridge](https://github.com/Snowfork/snowbridge): The rest of the Snowbridge components, like contracts, +off-chain relayer, end-to-end tests and test-net setup code. + ### Parachain -Polkadot parachain and our pallets. See [parachain/README.md](https://github.com/Snowfork/snowbridge/blob/main/parachain/README.md). +Polkadot parachain and our pallets. See [README.md](https://github.com/Snowfork/polkadot-sdk/blob/snowbridge/bridges/snowbridge/README.md). ### Contracts -Ethereum contracts and unit tests. See [contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) +Ethereum contracts and unit tests. See [Snowfork/snowbridge/contracts/README.md](https://github.com/Snowfork/snowbridge/blob/main/contracts/README.md) ### Relayer Off-chain relayer services for relaying messages between Polkadot and Ethereum. See -[relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) +[Snowfork/snowbridge/relayer/README.md](https://github.com/Snowfork/snowbridge/blob/main/relayer/README.md) ### Local Testnet Scripts to provision a local testnet, running the above services to bridge between local deployments of Polkadot and -Ethereum. See [web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). +Ethereum. See [Snowfork/snowbridge/web/packages/test/README.md](https://github.com/Snowfork/snowbridge/blob/main/web/packages/test/README.md). ### Smoke Tests -Integration tests for our local testnet. See [smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). +Integration tests for our local testnet. See [Snowfork/snowbridge/smoketest/README.md](https://github.com/Snowfork/snowbridge/blob/main/smoketest/README.md). ## Development @@ -83,7 +91,7 @@ direnv allow ### Upgrading the Rust toolchain -Sometimes we would like to upgrade rust toolchain. First update `parachain/rust-toolchain.toml` as required and then +Sometimes we would like to upgrade rust toolchain. First update `rust-toolchain.toml` as required and then update `flake.lock` running ```sh nix flake lock --update-input rust-overlay diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index b7f38fb753d3..6e5ceb5e9b1d 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -37,8 +37,6 @@ //! `force_update_channel` and extrinsics to manage agents and channels for system parachains. #![cfg_attr(not(feature = "std"), no_std)] -pub use pallet::*; - #[cfg(test)] mod mock; @@ -79,6 +77,8 @@ use xcm_executor::traits::ConvertLocation; #[cfg(feature = "runtime-benchmarks")] use frame_support::traits::OriginTrait; +pub use pallet::*; + pub type BalanceOf = <::Token as Inspect<::AccountId>>::Balance; pub type AccountIdOf = ::AccountId; diff --git a/bridges/snowbridge/runtime/test-common/src/lib.rs b/bridges/snowbridge/runtime/test-common/src/lib.rs index c9bbce98e575..29b0e738c182 100644 --- a/bridges/snowbridge/runtime/test-common/src/lib.rs +++ b/bridges/snowbridge/runtime/test-common/src/lib.rs @@ -13,9 +13,9 @@ use parachains_runtimes_test_utils::{ }; use snowbridge_core::{ChannelId, ParaId}; use snowbridge_pallet_ethereum_client_fixtures::*; -use sp_core::H160; +use sp_core::{H160, U256}; use sp_keyring::AccountKeyring::*; -use sp_runtime::{traits::Header, AccountId32, SaturatedConversion, Saturating}; +use sp_runtime::{traits::Header, AccountId32, DigestItem, SaturatedConversion, Saturating}; use xcm::{ latest::prelude::*, v3::Error::{self, Barrier}, @@ -53,7 +53,8 @@ where + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + snowbridge_pallet_outbound_queue::Config, + + snowbridge_pallet_outbound_queue::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, { let assethub_parachain_location = Location::new(1, Parachain(assethub_parachain_id)); @@ -125,7 +126,8 @@ pub fn send_transfer_token_message_success( + pallet_message_queue::Config + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config - + snowbridge_pallet_system::Config, + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, ::AccountId: From + AsRef<[u8]>, @@ -193,12 +195,100 @@ pub fn send_transfer_token_message_success( let digest = included_head.digest(); - //let digest = frame_system::Pallet::::digest(); let digest_items = digest.logs(); assert!(digest_items.len() == 1 && digest_items[0].as_other().is_some()); }); } +pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works< + Runtime, + XcmConfig, + AllPalletsWithoutSystem, +>( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + assethub_parachain_id: u32, + weth_contract_address: H160, + destination_address: H160, + fee_amount: u128, + snowbridge_pallet_outbound_queue: Box< + dyn Fn(Vec) -> Option>, + >, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + pallet_message_queue::Config + + cumulus_pallet_parachain_system::Config + + snowbridge_pallet_outbound_queue::Config + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, + XcmConfig: xcm_executor::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + ValidatorIdOf: From>, + ::AccountId: From + AsRef<[u8]>, +{ + ExtBuilder::::default() + .with_collators(collator_session_key.collators()) + .with_session_keys(collator_session_key.session_keys()) + .with_para_id(runtime_para_id.into()) + .with_tracing() + .build() + .execute_with(|| { + >::initialize( + runtime_para_id.into(), + assethub_parachain_id.into(), + ) + .unwrap(); + + // fund asset hub sovereign account enough so it can pay fees + initial_fund::(assethub_parachain_id, 5_000_000_000_000); + + let outcome = send_transfer_token_message::( + assethub_parachain_id, + weth_contract_address, + destination_address, + fee_amount, + ); + + assert_ok!(outcome.ensure_complete()); + + // check events + let mut events = >::events() + .into_iter() + .filter_map(|e| snowbridge_pallet_outbound_queue(e.event.encode())); + assert!(events.any(|e| matches!( + e, + snowbridge_pallet_outbound_queue::Event::MessageQueued { .. } + ))); + + let next_block_number: U256 = >::block_number() + .saturating_add(BlockNumberFor::::from(1u32)) + .into(); + + let included_head = + RuntimeHelper::::run_to_block_with_finalize( + next_block_number.as_u32(), + ); + let digest = included_head.digest(); + let digest_items = digest.logs(); + + let mut found_outbound_digest = false; + for digest_item in digest_items { + match digest_item { + DigestItem::Other(_) => found_outbound_digest = true, + _ => {}, + } + } + + assert_eq!(found_outbound_digest, true); + }); +} + pub fn send_unpaid_transfer_token_message( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, @@ -213,7 +303,8 @@ pub fn send_unpaid_transfer_token_message( + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + snowbridge_pallet_outbound_queue::Config, + + snowbridge_pallet_outbound_queue::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, { @@ -301,7 +392,8 @@ pub fn send_transfer_token_message_failure( + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config - + snowbridge_pallet_system::Config, + + snowbridge_pallet_system::Config + + pallet_timestamp::Config, XcmConfig: xcm_executor::Config, ValidatorIdOf: From>, { @@ -349,7 +441,8 @@ pub fn ethereum_extrinsic( + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config + snowbridge_pallet_system::Config - + snowbridge_pallet_ethereum_client::Config, + + snowbridge_pallet_ethereum_client::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, ::RuntimeCall: From>, @@ -430,7 +523,8 @@ pub fn ethereum_to_polkadot_message_extrinsics_work( + cumulus_pallet_parachain_system::Config + snowbridge_pallet_outbound_queue::Config + snowbridge_pallet_system::Config - + snowbridge_pallet_ethereum_client::Config, + + snowbridge_pallet_ethereum_client::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, ::RuntimeCall: From>, diff --git a/bridges/snowbridge/scripts/contribute-upstream.sh b/bridges/snowbridge/scripts/contribute-upstream.sh new file mode 100755 index 000000000000..8aa2d2a7035e --- /dev/null +++ b/bridges/snowbridge/scripts/contribute-upstream.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# A script to cleanup the Snowfork fork of the polkadot-sdk to contribute it upstream back to parity/polkadot-sdk +# ./bridges/snowbridge/scripts/contribute-upstream.sh + +# show CLI help +function show_help() { + set +x + echo " " + echo Error: $1 + echo "Usage:" + echo " ./bridges/snowbridge/scripts/contribute-upstream.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" + exit 1 +} + +if [[ -z "$1" ]]; then + echo "Please provide a branch name you would like your upstream branch to be named" + exit 1 +fi + +branch_name=$1 + +set -eux + +# let's avoid any restrictions on where this script can be called for - snowbridge repo may be +# plugged into any other repo folder. So the script (and other stuff that needs to be removed) +# may be located either in call dir, or one of it subdirs. +SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../" + +# Get the current Git branch name +current_branch=$(git rev-parse --abbrev-ref HEAD) + +if [ "$current_branch" = "$branch_name" ] || git branch | grep -q "$branch_name"; then + echo "Already on requested branch or branch exists, not creating." +else + git branch "$branch_name" +fi + +git checkout "$branch_name" + +# remove everything we think is not required for our needs +rm -rf rust-toolchain.toml +rm -rf $SNOWBRIDGE_FOLDER/.cargo +rm -rf $SNOWBRIDGE_FOLDER/.github +rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md +rm -rf $SNOWBRIDGE_FOLDER/.gitignore +rm -rf $SNOWBRIDGE_FOLDER/templates +rm -rf $SNOWBRIDGE_FOLDER/pallets/ethereum-client/fuzz + +pushd $SNOWBRIDGE_FOLDER + +# let's test if everything we need compiles +cargo check -p snowbridge-pallet-ethereum-client +cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks +cargo check -p snowbridge-pallet-ethereum-client --features try-runtime +cargo check -p snowbridge-pallet-inbound-queue +cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-inbound-queue --features try-runtime +cargo check -p snowbridge-pallet-outbound-queue +cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks +cargo check -p snowbridge-pallet-outbound-queue --features try-runtime +cargo check -p snowbridge-pallet-system +cargo check -p snowbridge-pallet-system --features runtime-benchmarks +cargo check -p snowbridge-pallet-system --features try-runtime + +# we're removing lock file after all checks are done. Otherwise we may use different +# Substrate/Polkadot/Cumulus commits and our checks will fail +rm -f $SNOWBRIDGE_FOLDER/Cargo.toml +rm -f $SNOWBRIDGE_FOLDER/Cargo.lock + +popd + +# Replace Parity's CI files, that we have overwritten in our fork, to run our own CI +rm -rf .github +git remote -v | grep -w parity || git remote add parity https://github.com/paritytech/polkadot-sdk +git fetch parity master +git checkout parity/master -- .github +git add -- .github + +echo "OK" diff --git a/bridges/snowbridge/scripts/verify-pallets-build.sh b/bridges/snowbridge/scripts/verify-pallets-build.sh deleted file mode 100755 index a62f48c84d4f..000000000000 --- a/bridges/snowbridge/scripts/verify-pallets-build.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash - -# A script to remove everything from snowbridge repository/subtree, except: -# -# - parachain -# - readme -# - license - -set -eu - -# show CLI help -function show_help() { - set +x - echo " " - echo Error: $1 - echo "Usage:" - echo " ./scripts/verify-pallets-build.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" - echo "Options:" - echo " --no-revert Leaves only runtime code on exit" - echo " --ignore-git-state Ignores git actual state" - exit 1 -} - -# parse CLI args -NO_REVERT= -IGNORE_GIT_STATE= -for i in "$@" -do - case $i in - --no-revert) - NO_REVERT=true - shift - ;; - --ignore-git-state) - IGNORE_GIT_STATE=true - shift - ;; - *) - show_help "Unknown option: $i" - ;; - esac -done - -# the script is able to work only on clean git copy, unless we want to ignore this check -[[ ! -z "${IGNORE_GIT_STATE}" ]] || [[ -z "$(git status --porcelain)" ]] || { echo >&2 "The git copy must be clean"; exit 1; } - -# let's avoid any restrictions on where this script can be called for - snowbridge repo may be -# plugged into any other repo folder. So the script (and other stuff that needs to be removed) -# may be located either in call dir, or one of it subdirs. -SNOWBRIDGE_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/../.." - -# remove everything we think is not required for our needs -rm -rf $SNOWBRIDGE_FOLDER/.cargo -rm -rf $SNOWBRIDGE_FOLDER/.github -rm -rf $SNOWBRIDGE_FOLDER/contracts -rm -rf $SNOWBRIDGE_FOLDER/codecov.yml -rm -rf $SNOWBRIDGE_FOLDER/docs -rm -rf $SNOWBRIDGE_FOLDER/hooks -rm -rf $SNOWBRIDGE_FOLDER/relayer -rm -rf $SNOWBRIDGE_FOLDER/scripts -rm -rf $SNOWBRIDGE_FOLDER/SECURITY.md -rm -rf $SNOWBRIDGE_FOLDER/smoketest -rm -rf $SNOWBRIDGE_FOLDER/web -rm -rf $SNOWBRIDGE_FOLDER/.envrc-example -rm -rf $SNOWBRIDGE_FOLDER/.gitbook.yaml -rm -rf $SNOWBRIDGE_FOLDER/.gitignore -rm -rf $SNOWBRIDGE_FOLDER/.gitmodules -rm -rf $SNOWBRIDGE_FOLDER/_typos.toml -rm -rf $SNOWBRIDGE_FOLDER/_codecov.yml -rm -rf $SNOWBRIDGE_FOLDER/flake.lock -rm -rf $SNOWBRIDGE_FOLDER/flake.nix -rm -rf $SNOWBRIDGE_FOLDER/go.work -rm -rf $SNOWBRIDGE_FOLDER/go.work.sum -rm -rf $SNOWBRIDGE_FOLDER/polkadot-sdk -rm -rf $SNOWBRIDGE_FOLDER/rust-toolchain.toml -rm -rf $SNOWBRIDGE_FOLDER/parachain/rustfmt.toml -rm -rf $SNOWBRIDGE_FOLDER/parachain/.gitignore -rm -rf $SNOWBRIDGE_FOLDER/parachain/templates -rm -rf $SNOWBRIDGE_FOLDER/parachain/.cargo -rm -rf $SNOWBRIDGE_FOLDER/parachain/.config -rm -rf $SNOWBRIDGE_FOLDER/parachain/pallets/ethereum-client/fuzz - -cd bridges/snowbridge/parachain - -# fix polkadot-sdk paths in Cargo.toml files -find "." -name 'Cargo.toml' | while read -r file; do - replace=$(printf '../../' ) - if [[ "$(uname)" = "Darwin" ]] || [[ "$(uname)" = *BSD ]]; then - sed -i '' "s|polkadot-sdk/|$replace|g" "$file" - else - sed -i "s|polkadot-sdk/|$replace|g" "$file" - fi -done - -# let's test if everything we need compiles -cargo check -p snowbridge-pallet-ethereum-client -cargo check -p snowbridge-pallet-ethereum-client --features runtime-benchmarks -cargo check -p snowbridge-pallet-ethereum-client --features try-runtime -cargo check -p snowbridge-pallet-inbound-queue -cargo check -p snowbridge-pallet-inbound-queue --features runtime-benchmarks -cargo check -p snowbridge-pallet-inbound-queue --features try-runtime -cargo check -p snowbridge-pallet-outbound-queue -cargo check -p snowbridge-pallet-outbound-queue --features runtime-benchmarks -cargo check -p snowbridge-pallet-outbound-queue --features try-runtime -cargo check -p snowbridge-pallet-system -cargo check -p snowbridge-pallet-system --features runtime-benchmarks -cargo check -p snowbridge-pallet-system --features try-runtime - -cd - - -# we're removing lock file after all checks are done. Otherwise we may use different -# Substrate/Polkadot/Cumulus commits and our checks will fail -rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.toml -rm -f $SNOWBRIDGE_FOLDER/parachain/Cargo.lock - -echo "OK" diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 89c4925cb1ac..883c93c97b4d 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { path = "../../../../../substrate/frame/support", default-featu frame-system = { path = "../../../../../substrate/frame/system", default-features = false } pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } @@ -59,6 +60,7 @@ std = [ "pallet-balances/std", "pallet-collator-selection/std", "pallet-session/std", + "pallet-timestamp/std", "pallet-xcm-bridge-hub-router/std", "pallet-xcm/std", "parachain-info/std", diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 4007d926983e..53e10956bd0d 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -70,7 +70,8 @@ pub fn teleports_for_native_asset_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -350,7 +351,8 @@ pub fn teleports_for_foreign_assets_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -701,7 +703,8 @@ pub fn asset_transactor_transfer_with_local_consensus_currency_works: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -826,7 +829,8 @@ pub fn asset_transactor_transfer_with_pallet_assets_instance_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AccountIdOf: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -1093,7 +1097,8 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AccountIdOf: Into<[u8; 32]>, ValidatorIdOf: From>, BalanceOf: From, @@ -1422,7 +1427,8 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 66ed3417951a..1cce3b647cf0 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -70,7 +70,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config - + cumulus_pallet_xcmp_queue::Config, + + cumulus_pallet_xcmp_queue::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, @@ -347,7 +348,8 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_assets::Config, + + pallet_assets::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]> + From<[u8; 32]>, @@ -510,7 +512,8 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + cumulus_pallet_xcmp_queue::Config - + pallet_xcm_bridge_hub_router::Config, + + pallet_xcm_bridge_hub_router::Config + + pallet_timestamp::Config, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: Into<[u8; 32]>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index d1eac089f670..ae50d2a93cb9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -730,7 +730,7 @@ construct_runtime!( // Message Queue. Importantly, is registered last so that messages are processed after // the `on_initialize` hooks of bridging pallets. - MessageQueue: pallet_message_queue = 250, + MessageQueue: pallet_message_queue = 175, } ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index b2e5cc9ef06f..b9f43624b652 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -20,9 +20,9 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - xcm_config::XcmConfig, BridgeRejectObsoleteHeadersAndMessages, Executive, - MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, - UncheckedExtrinsic, + xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, + Executive, MessageQueueServiceWeight, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, + SignedExtra, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -135,6 +135,32 @@ fn ethereum_to_polkadot_message_extrinsics_work() { ); } +/// Tests that the digest items are as expected when a Ethereum Outbound message is received. +/// If the MessageQueue pallet is configured before (i.e. the MessageQueue pallet is listed before +/// the EthereumOutboundQueue in the construct_runtime macro) the EthereumOutboundQueue, this test +/// will fail. +#[test] +pub fn ethereum_outbound_queue_processes_messages_before_message_queue_works() { + snowbridge_runtime_test_common::ethereum_outbound_queue_processes_messages_before_message_queue_works::< + Runtime, + XcmConfig, + AllPalletsWithoutSystem, + >( + collator_session_keys(), + 1013, + 1000, + H160::random(), + H160::random(), + DefaultBridgeHubEthereumBaseFee::get(), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::EthereumOutboundQueue(event)) => Some(event), + _ => None, + } + }), + ) +} + fn construct_extrinsic( sender: sp_keyring::AccountKeyring, call: RuntimeCall, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index d34b5cd0eed1..5f2a6e050d83 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -25,6 +25,7 @@ sp-std = { path = "../../../../../substrate/primitives/std", default-features = sp-tracing = { path = "../../../../../substrate/primitives/tracing" } pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } # Cumulus asset-test-utils = { path = "../../assets/test-utils" } @@ -73,6 +74,7 @@ std = [ "pallet-bridge-messages/std", "pallet-bridge-parachains/std", "pallet-bridge-relayers/std", + "pallet-timestamp/std", "pallet-utility/std", "parachains-common/std", "parachains-runtimes-test-utils/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 4f634c184aa8..2b48f2e3d515 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -197,7 +197,9 @@ where pub(crate) fn initialize_bridge_grandpa_pallet( init_data: bp_header_chain::InitializationData>, ) where - Runtime: BridgeGrandpaConfig, + Runtime: BridgeGrandpaConfig + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, { pallet_bridge_grandpa::Pallet::::initialize( RuntimeHelper::::root_origin(), diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index a61e05de13fa..eda88beb7dab 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { path = "../../../../substrate/frame/support", default-features frame-system = { path = "../../../../substrate/frame/system", default-features = false } pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } pallet-session = { path = "../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../substrate/frame/timestamp", default-features = false } sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } sp-io = { path = "../../../../substrate/primitives/io", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } @@ -59,6 +60,7 @@ std = [ "pallet-balances/std", "pallet-collator-selection/std", "pallet-session/std", + "pallet-timestamp/std", "pallet-xcm/std", "parachain-info/std", "polkadot-parachain-primitives/std", diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index b4eb57fcb66f..e62daa16a125 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -34,7 +34,7 @@ use polkadot_parachain_primitives::primitives::{ }; use sp_consensus_aura::{SlotDuration, AURA_ENGINE_ID}; use sp_core::{Encode, U256}; -use sp_runtime::{traits::Header, BuildStorage, Digest, DigestItem}; +use sp_runtime::{traits::Header, BuildStorage, Digest, DigestItem, SaturatedConversion}; use xcm::{ latest::{Asset, Location, XcmContext, XcmHash}, prelude::*, @@ -129,6 +129,7 @@ pub trait BasicParachainRuntime: + parachain_info::Config + pallet_collator_selection::Config + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config { } @@ -140,7 +141,8 @@ where + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config, + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, { } @@ -259,8 +261,10 @@ pub struct RuntimeHelper( ); /// Utility function that advances the chain to the desired block number. /// If an author is provided, that author information is injected to all the blocks in the meantime. -impl - RuntimeHelper +impl< + Runtime: frame_system::Config + cumulus_pallet_parachain_system::Config + pallet_timestamp::Config, + AllPalletsWithoutSystem, + > RuntimeHelper where AccountIdOf: Into<<::RuntimeOrigin as OriginTrait>::AccountId>, @@ -296,6 +300,65 @@ where last_header.expect("run_to_block empty block range") } + pub fn run_to_block_with_finalize(n: u32) -> HeaderFor { + let mut last_header = None; + loop { + let block_number = frame_system::Pallet::::block_number(); + if block_number >= n.into() { + break + } + // Set the new block number and author + let header = frame_system::Pallet::::finalize(); + + let pre_digest = Digest { + logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, block_number.encode())], + }; + frame_system::Pallet::::reset_events(); + + let next_block_number = block_number + 1u32.into(); + frame_system::Pallet::::initialize( + &next_block_number, + &header.hash(), + &pre_digest, + ); + AllPalletsWithoutSystem::on_initialize(next_block_number); + + let parent_head = HeadData(header.encode()); + let sproof_builder = RelayStateSproofBuilder { + para_id: ::SelfParaId::get(), + included_para_head: parent_head.clone().into(), + ..Default::default() + }; + + let (relay_parent_storage_root, relay_chain_state) = + sproof_builder.into_state_root_and_proof(); + let inherent_data = ParachainInherentData { + validation_data: PersistedValidationData { + parent_head, + relay_parent_number: (block_number.saturated_into::() * 2 + 1).into(), + relay_parent_storage_root, + max_pov_size: 100_000_000, + }, + relay_chain_state, + downward_messages: Default::default(), + horizontal_messages: Default::default(), + }; + + let _ = cumulus_pallet_parachain_system::Pallet::::set_validation_data( + Runtime::RuntimeOrigin::none(), + inherent_data, + ); + let _ = pallet_timestamp::Pallet::::set( + Runtime::RuntimeOrigin::none(), + 300_u32.into(), + ); + AllPalletsWithoutSystem::on_finalize(next_block_number); + let header = frame_system::Pallet::::finalize(); + last_header = Some(header); + } + last_header.expect("run_to_block empty block range") + } + pub fn root_origin() -> ::RuntimeOrigin { ::RuntimeOrigin::root() } diff --git a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs index f78bf9877ec2..1c58df189b67 100644 --- a/cumulus/parachains/runtimes/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/test-utils/src/test_cases.rs @@ -37,7 +37,8 @@ pub fn change_storage_constant_by_governance_works: From>, StorageConstant: Get, StorageConstantType: Encode + PartialEq + std::fmt::Debug, @@ -107,7 +108,8 @@ pub fn set_storage_keys_by_governance_works( + pallet_xcm::Config + parachain_info::Config + pallet_collator_selection::Config - + cumulus_pallet_parachain_system::Config, + + cumulus_pallet_parachain_system::Config + + pallet_timestamp::Config, ValidatorIdOf: From>, { let mut runtime = ExtBuilder::::default() From 165d075a5f680791c8e0b128ea60edd590a44f1f Mon Sep 17 00:00:00 2001 From: Dastan <88332432+dastansam@users.noreply.github.com> Date: Wed, 21 Feb 2024 15:56:09 +0100 Subject: [PATCH 16/51] benchmarking-cli: add --list-pallets and --all options (#3395) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit closes #2844 - adds `list-pallets` option which prints all unique available pallets for benchmarking ```bash ./target/release/node benchmark pallet --list=pallets ``` - adds `all` option which runs benchmarks for all available pallets and extrinsics (equivalent to `--pallet * --extrinsic *`) ```bash ./target/release/node benchmark pallet --all ``` - use the `list=pallets` syntax in `run_all_benchmarks.sh` script cc ggwpez --------- Co-authored-by: Bastian Köcher --- prdoc/pr_3395.prdoc | 14 +++++ substrate/scripts/run_all_benchmarks.sh | 11 ++-- .../benchmarking-cli/src/pallet/command.rs | 54 ++++++++++++++----- .../frame/benchmarking-cli/src/pallet/mod.rs | 32 ++++++++--- 4 files changed, 86 insertions(+), 25 deletions(-) create mode 100644 prdoc/pr_3395.prdoc diff --git a/prdoc/pr_3395.prdoc b/prdoc/pr_3395.prdoc new file mode 100644 index 000000000000..a10fb84fd7f5 --- /dev/null +++ b/prdoc/pr_3395.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "`benchmarking-cli` `pallet` subcommand: refactor `--list` and add `--all` option" + +doc: + - audience: Node Dev + description: | + `pallet` subcommand's `--list` now accepts two values: "all" and "pallets". The former will list all available benchmarks, the latter will list only pallets. + Also adds `--all` to run all the available benchmarks and `--no-csv-header` to omit the csv-style header in the output. + NOTE: changes are backward compatible. + +crates: + - name: frame-benchmarking-cli diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh index 83848100a7e5..6dd7cede319f 100755 --- a/substrate/scripts/run_all_benchmarks.sh +++ b/substrate/scripts/run_all_benchmarks.sh @@ -59,11 +59,12 @@ done if [ "$skip_build" != true ] then echo "[+] Compiling Substrate benchmarks..." - cargo build --profile=production --locked --features=runtime-benchmarks --bin substrate + cargo build --profile=production --locked --features=runtime-benchmarks --bin substrate-node fi # The executable to use. -SUBSTRATE=./target/production/substrate +# Parent directory because of the monorepo structure. +SUBSTRATE=../target/production/substrate-node # Manually exclude some pallets. EXCLUDED_PALLETS=( @@ -80,11 +81,7 @@ EXCLUDED_PALLETS=( # Load all pallet names in an array. ALL_PALLETS=($( - $SUBSTRATE benchmark pallet --list --chain=dev |\ - tail -n+2 |\ - cut -d',' -f1 |\ - sort |\ - uniq + $SUBSTRATE benchmark pallet --list=pallets --no-csv-header --chain=dev )) # Filter out the excluded pallets by concatenating the arrays and discarding duplicates. diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index dce49db15f7d..d5dc6226ab9f 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{writer, PalletCmd}; +use super::{writer, ListOutput, PalletCmd}; use codec::{Decode, Encode}; use frame_benchmarking::{ Analysis, BenchmarkBatch, BenchmarkBatchSplitResults, BenchmarkList, BenchmarkParameter, @@ -39,7 +39,13 @@ use sp_externalities::Extensions; use sp_keystore::{testing::MemoryKeystore, KeystoreExt}; use sp_runtime::traits::Hash; use sp_state_machine::StateMachine; -use std::{collections::HashMap, fmt::Debug, fs, str::FromStr, time}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + fmt::Debug, + fs, + str::FromStr, + time, +}; /// Logging target const LOG_TARGET: &'static str = "frame::benchmark::pallet"; @@ -191,6 +197,7 @@ impl PalletCmd { let spec = config.chain_spec; let pallet = self.pallet.clone().unwrap_or_default(); let pallet = pallet.as_bytes(); + let extrinsic = self.extrinsic.clone().unwrap_or_default(); let extrinsic_split: Vec<&str> = extrinsic.split(',').collect(); let extrinsics: Vec<_> = extrinsic_split.iter().map(|x| x.trim().as_bytes()).collect(); @@ -309,9 +316,8 @@ impl PalletCmd { return Err("No benchmarks found which match your input.".into()) } - if self.list { - // List benchmarks instead of running them - list_benchmark(benchmarks_to_run); + if let Some(list_output) = self.list { + list_benchmark(benchmarks_to_run, list_output, self.no_csv_header); return Ok(()) } @@ -755,19 +761,43 @@ impl CliConfiguration for PalletCmd { /// List the benchmarks available in the runtime, in a CSV friendly format. fn list_benchmark( - mut benchmarks_to_run: Vec<( + benchmarks_to_run: Vec<( Vec, Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, )>, + list_output: ListOutput, + no_csv_header: bool, ) { - // Sort and de-dub by pallet and function name. - benchmarks_to_run.sort_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa).cmp(&(pb, sb))); - benchmarks_to_run.dedup_by(|(pa, sa, _, _), (pb, sb, _, _)| (pa, sa) == (pb, sb)); + let mut benchmarks = BTreeMap::new(); - println!("pallet, benchmark"); - for (pallet, extrinsic, _, _) in benchmarks_to_run { - println!("{}, {}", String::from_utf8_lossy(&pallet), String::from_utf8_lossy(&extrinsic)); + // Sort and de-dub by pallet and function name. + benchmarks_to_run.iter().for_each(|(pallet, extrinsic, _, _)| { + benchmarks + .entry(String::from_utf8_lossy(pallet).to_string()) + .or_insert_with(BTreeSet::new) + .insert(String::from_utf8_lossy(extrinsic).to_string()); + }); + + match list_output { + ListOutput::All => { + if !no_csv_header { + println!("pallet,extrinsic"); + } + for (pallet, extrinsics) in benchmarks { + for extrinsic in extrinsics { + println!("{pallet},{extrinsic}"); + } + } + }, + ListOutput::Pallets => { + if !no_csv_header { + println!("pallet"); + }; + for pallet in benchmarks.keys() { + println!("{pallet}"); + } + }, } } diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index c69ce1765fc9..6dc56c0724ea 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -19,6 +19,7 @@ mod command; mod writer; use crate::shared::HostInfoParams; +use clap::ValueEnum; use sc_cli::{ WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD, @@ -31,17 +32,32 @@ fn parse_pallet_name(pallet: &str) -> std::result::Result { Ok(pallet.replace("-", "_")) } +/// List options for available benchmarks. +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum ListOutput { + /// List all available pallets and extrinsics. + All, + /// List all available pallets only. + Pallets, +} + /// Benchmark the extrinsic weight of FRAME Pallets. #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input"])] + #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[arg(short, long, required_unless_present_any = ["list", "json_input"])] + #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub extrinsic: Option, + /// Run benchmarks for all pallets and extrinsics. + /// + /// This is equivalent to running `--pallet * --extrinsic *`. + #[arg(long)] + pub all: bool, + /// Select how many samples we should take across the variable components. #[arg(short, long, default_value_t = 50)] pub steps: u32, @@ -158,11 +174,15 @@ pub struct PalletCmd { #[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)] pub database_cache_size: u32, - /// List the benchmarks that match your query rather than running them. + /// List and print available benchmarks in a csv-friendly format. /// - /// When nothing is provided, we list all benchmarks. - #[arg(long)] - pub list: bool, + /// NOTE: `num_args` and `require_equals` are required to allow `--list` + #[arg(long, value_enum, ignore_case = true, num_args = 0..=1, require_equals = true, default_missing_value("All"))] + pub list: Option, + + /// Don't include csv header when listing benchmarks. + #[arg(long, requires("list"))] + pub no_csv_header: bool, /// If enabled, the storage info is not displayed in the output next to the analysis. /// From 1b624c50728b7f38b31075c3dd8f9a55f0ceee55 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 21 Feb 2024 18:31:49 +0200 Subject: [PATCH 17/51] rpc-v2/tx: Remove the broadcast event from `transaction_submitAndWatch` (#3321) This PR backports the changes from the rpc-v2 spec: https://github.com/paritytech/json-rpc-interface-spec/pull/134 The `Broadcasted` event has been removed: - it is hard to enforce a `Dropped { broadcasted: bool }` event in cases of a load-balancer being placed in front of an RPC server - when the server exists, it is impossible to guarantee this field if the server did not previously send a `Broadcasted` event - the number of peers reported by this event does not guarantee that peers are unique - the same peer can disconnect and reconnect, increasing this metric number - the number of peers that receive this transaction offers no guarantee about the transaction being included in the chain at a later time cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../rpc-spec-v2/src/transaction/event.rs | 59 ++--------- .../client/rpc-spec-v2/src/transaction/mod.rs | 5 +- .../src/transaction/transaction.rs | 97 ++++++------------- 3 files changed, 38 insertions(+), 123 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/transaction/event.rs b/substrate/client/rpc-spec-v2/src/transaction/event.rs index 8b80fcda17be..882ac8490b07 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/event.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/event.rs @@ -20,23 +20,6 @@ use serde::{Deserialize, Serialize}; -/// The transaction was broadcasted to a number of peers. -/// -/// # Note -/// -/// The RPC does not guarantee that the peers have received the -/// transaction. -/// -/// When the number of peers is zero, the event guarantees that -/// shutting down the local node will lead to the transaction -/// not being included in the chain. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBroadcasted { - /// The number of peers the transaction was broadcasted to. - pub num_peers: usize, -} - /// The transaction was included in a block of the chain. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -59,9 +42,6 @@ pub struct TransactionError { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionDropped { - /// True if the transaction was broadcasted to other peers and - /// may still be included in the block. - pub broadcasted: bool, /// Reason of the event. pub error: String, } @@ -70,20 +50,17 @@ pub struct TransactionDropped { /// /// The status events can be grouped based on their kinds as: /// -/// 1. Runtime validated the transaction: +/// 1. Runtime validated the transaction and it entered the pool: /// - `Validated` /// -/// 2. Inside the `Ready` queue: -/// - `Broadcast` -/// -/// 3. Leaving the pool: +/// 2. Leaving the pool: /// - `BestChainBlockIncluded` /// - `Invalid` /// -/// 4. Block finalized: +/// 3. Block finalized: /// - `Finalized` /// -/// 5. At any time: +/// 4. At any time: /// - `Dropped` /// - `Error` /// @@ -101,8 +78,6 @@ pub struct TransactionDropped { pub enum TransactionEvent { /// The transaction was validated by the runtime. Validated, - /// The transaction was broadcasted to a number of peers. - Broadcasted(TransactionBroadcasted), /// The transaction was included in a best block of the chain. /// /// # Note @@ -159,7 +134,6 @@ enum TransactionEventBlockIR { #[serde(tag = "event")] enum TransactionEventNonBlockIR { Validated, - Broadcasted(TransactionBroadcasted), Error(TransactionError), Invalid(TransactionError), Dropped(TransactionDropped), @@ -186,8 +160,6 @@ impl From> for TransactionEventIR { match value { TransactionEvent::Validated => TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), - TransactionEvent::Broadcasted(event) => - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), TransactionEvent::BestChainBlockIncluded(event) => TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), TransactionEvent::Finalized(event) => @@ -207,8 +179,6 @@ impl From> for TransactionEvent { match value { TransactionEventIR::NonBlock(status) => match status { TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => - TransactionEvent::Broadcasted(event), TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), @@ -239,19 +209,6 @@ mod tests { assert_eq!(event_dec, event); } - #[test] - fn broadcasted_event() { - let event: TransactionEvent<()> = - TransactionEvent::Broadcasted(TransactionBroadcasted { num_peers: 2 }); - let ser = serde_json::to_string(&event).unwrap(); - - let exp = r#"{"event":"broadcasted","numPeers":2}"#; - assert_eq!(ser, exp); - - let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); - assert_eq!(event_dec, event); - } - #[test] fn best_chain_event() { let event: TransactionEvent<()> = TransactionEvent::BestChainBlockIncluded(None); @@ -320,13 +277,11 @@ mod tests { #[test] fn dropped_event() { - let event: TransactionEvent<()> = TransactionEvent::Dropped(TransactionDropped { - broadcasted: true, - error: "abc".to_string(), - }); + let event: TransactionEvent<()> = + TransactionEvent::Dropped(TransactionDropped { error: "abc".to_string() }); let ser = serde_json::to_string(&event).unwrap(); - let exp = r#"{"event":"dropped","broadcasted":true,"error":"abc"}"#; + let exp = r#"{"event":"dropped","error":"abc"}"#; assert_eq!(ser, exp); let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); diff --git a/substrate/client/rpc-spec-v2/src/transaction/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/mod.rs index 74268a5372a3..514ccf047dc2 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/mod.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/mod.rs @@ -35,9 +35,6 @@ pub mod transaction; pub mod transaction_broadcast; pub use api::{TransactionApiServer, TransactionBroadcastApiServer}; -pub use event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, -}; +pub use event::{TransactionBlock, TransactionDropped, TransactionError, TransactionEvent}; pub use transaction::Transaction; pub use transaction_broadcast::TransactionBroadcast; diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 17889b3bad2a..d44006392dca 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -22,10 +22,7 @@ use crate::{ transaction::{ api::TransactionApiServer, error::Error, - event::{ - TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, - TransactionEvent, - }, + event::{TransactionBlock, TransactionDropped, TransactionError, TransactionEvent}, }, SubscriptionTaskExecutor, }; @@ -113,9 +110,7 @@ where match submit.await { Ok(stream) => { - let mut state = TransactionState::new(); - let stream = - stream.filter_map(move |event| async move { state.handle_event(event) }); + let stream = stream.filter_map(move |event| async move { handle_event(event) }); pipe_from_stream(pending, stream.boxed()).await; }, Err(err) => { @@ -131,66 +126,34 @@ where } } -/// The transaction's state that needs to be preserved between -/// multiple events generated by the transaction-pool. -/// -/// # Note -/// -/// In the future, the RPC server can submit only the last event when multiple -/// identical events happen in a row. -#[derive(Clone, Copy)] -struct TransactionState { - /// True if the transaction was previously broadcasted. - broadcasted: bool, -} - -impl TransactionState { - /// Construct a new [`TransactionState`]. - pub fn new() -> Self { - TransactionState { broadcasted: false } - } - - /// Handle events generated by the transaction-pool and convert them - /// to the new API expected state. - #[inline] - pub fn handle_event( - &mut self, - event: TransactionStatus, - ) -> Option> { - match event { - TransactionStatus::Ready | TransactionStatus::Future => - Some(TransactionEvent::::Validated), - TransactionStatus::Broadcast(peers) => { - // Set the broadcasted flag once if we submitted the transaction to - // at least one peer. - self.broadcasted = self.broadcasted || !peers.is_empty(); - - Some(TransactionEvent::Broadcasted(TransactionBroadcasted { - num_peers: peers.len(), - })) - }, - TransactionStatus::InBlock((hash, index)) => - Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { - hash, - index, - }))), - TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), - TransactionStatus::FinalityTimeout(_) => - Some(TransactionEvent::Dropped(TransactionDropped { - broadcasted: self.broadcasted, - error: "Maximum number of finality watchers has been reached".into(), - })), - TransactionStatus::Finalized((hash, index)) => - Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), - TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic was rendered invalid by another extrinsic".into(), - })), - TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic dropped from the pool due to exceeding limits".into(), - })), - TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { - error: "Extrinsic marked as invalid".into(), +/// Handle events generated by the transaction-pool and convert them +/// to the new API expected state. +#[inline] +pub fn handle_event( + event: TransactionStatus, +) -> Option> { + match event { + TransactionStatus::Ready | TransactionStatus::Future => + Some(TransactionEvent::::Validated), + TransactionStatus::InBlock((hash, index)) => + Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { hash, index }))), + TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), + TransactionStatus::FinalityTimeout(_) => + Some(TransactionEvent::Dropped(TransactionDropped { + error: "Maximum number of finality watchers has been reached".into(), })), - } + TransactionStatus::Finalized((hash, index)) => + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), + TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic was rendered invalid by another extrinsic".into(), + })), + TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic dropped from the pool due to exceeding limits".into(), + })), + TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic marked as invalid".into(), + })), + // These are the events that are not supported by the new API. + TransactionStatus::Broadcast(_) => None, } } From 318fed32f8ff42e451ac9f750c25c0f2f85320ad Mon Sep 17 00:00:00 2001 From: tmpolaczyk <44604217+tmpolaczyk@users.noreply.github.com> Date: Wed, 21 Feb 2024 17:38:06 +0100 Subject: [PATCH 18/51] ProposerFactory impl Clone (#3389) In Tanssi, we need a way to stop the collator code and then start it again. This is to support rotating the same collator between different runtimes. Currently, this works very well, except for the proposer metrics, because they only get registered the first time they are started. Afterwards, we see this warning log: > Failed to register proposer prometheus metrics: Duplicate metrics collector registration attempted ~~So this PR adds a method to set metrics, to allow us to register metrics manually before creating the `ProposerFactory`, and then clone the same metrics every time we need to start the collator.~~ Implemented Clone instead --- .../basic-authorship/src/basic_authorship.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index c07f3e639c3e..fdc3d4918de3 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -87,6 +87,22 @@ pub struct ProposerFactory { _phantom: PhantomData, } +impl Clone for ProposerFactory { + fn clone(&self) -> Self { + Self { + spawn_handle: self.spawn_handle.clone(), + client: self.client.clone(), + transaction_pool: self.transaction_pool.clone(), + metrics: self.metrics.clone(), + default_block_size_limit: self.default_block_size_limit, + soft_deadline_percent: self.soft_deadline_percent, + telemetry: self.telemetry.clone(), + include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, + _phantom: self._phantom, + } + } +} + impl ProposerFactory { /// Create a new proposer factory. /// From cd91c6b782b60906597ed28c4e4a4fdddd8428c9 Mon Sep 17 00:00:00 2001 From: Matteo Muraca <56828990+muraca@users.noreply.github.com> Date: Thu, 22 Feb 2024 00:02:31 +0100 Subject: [PATCH 19/51] removed `pallet::getter` from example pallets (#3371) part of #3326 @ggwpez @kianenigma @shawntabrizi --------- Signed-off-by: Matteo Muraca Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- .../pallets/template/src/lib.rs | 1 - .../pallets/template/src/tests.rs | 4 ++-- prdoc/pr_3371.prdoc | 19 +++++++++++++++++++ .../node-template/pallets/template/src/lib.rs | 4 +--- .../pallets/template/src/tests.rs | 4 ++-- .../frame/examples/basic/src/benchmarking.rs | 2 +- substrate/frame/examples/basic/src/lib.rs | 14 +++----------- substrate/frame/examples/basic/src/tests.rs | 12 ++++++------ .../examples/kitchensink/src/benchmarking.rs | 2 +- .../frame/examples/kitchensink/src/lib.rs | 1 - .../frame/examples/offchain-worker/src/lib.rs | 12 +++++------- substrate/frame/examples/split/src/lib.rs | 2 +- 12 files changed, 41 insertions(+), 36 deletions(-) create mode 100644 prdoc/pr_3371.prdoc diff --git a/cumulus/parachain-template/pallets/template/src/lib.rs b/cumulus/parachain-template/pallets/template/src/lib.rs index 5f3252bfc3a7..24226d6cf406 100644 --- a/cumulus/parachain-template/pallets/template/src/lib.rs +++ b/cumulus/parachain-template/pallets/template/src/lib.rs @@ -32,7 +32,6 @@ pub mod pallet { // The pallet's runtime storage items. // https://docs.substrate.io/v3/runtime/storage #[pallet::storage] - #[pallet::getter(fn something)] // Learn more about declaring storage items: // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items pub type Something = StorageValue<_, u32>; diff --git a/cumulus/parachain-template/pallets/template/src/tests.rs b/cumulus/parachain-template/pallets/template/src/tests.rs index 527aec8ed00c..9ad3076be2cc 100644 --- a/cumulus/parachain-template/pallets/template/src/tests.rs +++ b/cumulus/parachain-template/pallets/template/src/tests.rs @@ -1,4 +1,4 @@ -use crate::{mock::*, Error}; +use crate::{mock::*, Error, Something}; use frame_support::{assert_noop, assert_ok}; #[test] @@ -7,7 +7,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(TemplateModule::something(), Some(42)); + assert_eq!(Something::::get(), Some(42)); }); } diff --git a/prdoc/pr_3371.prdoc b/prdoc/pr_3371.prdoc new file mode 100644 index 000000000000..605d540772f0 --- /dev/null +++ b/prdoc/pr_3371.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: removed `pallet::getter` from example pallets + +doc: + - audience: Runtime Dev + description: | + This PR removes all the `pallet::getter` usages from the template pallets found in the Substrate and Cumulus template nodes, and from the Substrate example pallets. + The purpose is to discourage developers to use this macro, that is currently being removed and soon will be deprecated. + +crates: + - name: pallet-template + - name: pallet-parachain-template + - name: pallet-example-basic + - name: pallet-example-kitchensink + - name: pallet-example-offchain-worker + - name: pallet-example-split + diff --git a/substrate/bin/node-template/pallets/template/src/lib.rs b/substrate/bin/node-template/pallets/template/src/lib.rs index 4a2e53baa774..90dfe3701458 100644 --- a/substrate/bin/node-template/pallets/template/src/lib.rs +++ b/substrate/bin/node-template/pallets/template/src/lib.rs @@ -90,9 +90,7 @@ pub mod pallet { /// /// In this template, we are declaring a storage item called `Something` that stores a single /// `u32` value. Learn more about runtime storage here: - /// The [`getter`] macro generates a function to conveniently retrieve the value from storage. #[pallet::storage] - #[pallet::getter(fn something)] pub type Something = StorageValue<_, u32>; /// Events that functions in this pallet can emit. @@ -187,7 +185,7 @@ pub mod pallet { let _who = ensure_signed(origin)?; // Read a value from storage. - match Pallet::::something() { + match Something::::get() { // Return an error if the value has not been set. None => Err(Error::::NoneValue.into()), Some(old) => { diff --git a/substrate/bin/node-template/pallets/template/src/tests.rs b/substrate/bin/node-template/pallets/template/src/tests.rs index 7c2b853ee4dc..83e4bea7377b 100644 --- a/substrate/bin/node-template/pallets/template/src/tests.rs +++ b/substrate/bin/node-template/pallets/template/src/tests.rs @@ -1,4 +1,4 @@ -use crate::{mock::*, Error, Event}; +use crate::{mock::*, Error, Event, Something}; use frame_support::{assert_noop, assert_ok}; #[test] @@ -9,7 +9,7 @@ fn it_works_for_default_value() { // Dispatch a signed extrinsic. assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. - assert_eq!(TemplateModule::something(), Some(42)); + assert_eq!(Something::::get(), Some(42)); // Assert that the correct event was deposited System::assert_last_event(Event::SomethingStored { something: 42, who: 1 }.into()); }); diff --git a/substrate/frame/examples/basic/src/benchmarking.rs b/substrate/frame/examples/basic/src/benchmarking.rs index 4b2ebb41fbda..65ca3089aba4 100644 --- a/substrate/frame/examples/basic/src/benchmarking.rs +++ b/substrate/frame/examples/basic/src/benchmarking.rs @@ -48,7 +48,7 @@ mod benchmarks { set_dummy(RawOrigin::Root, value); // The execution phase is just running `set_dummy` extrinsic call // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::dummy(), Some(value)) + assert_eq!(Dummy::::get(), Some(value)) } // An example method that returns a Result that can be called within a benchmark diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index dad4d01978f9..12cadc969fd7 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -286,9 +286,7 @@ pub mod pallet { let _sender = ensure_signed(origin)?; // Read the value of dummy from storage. - // let dummy = Self::dummy(); - // Will also work using the `::get` on the storage item type itself: - // let dummy = >::get(); + // let dummy = Dummy::::get(); // Calculate the new value. // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); @@ -381,20 +379,14 @@ pub mod pallet { // - `Foo::put(1); Foo::get()` returns `1`; // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). #[pallet::storage] - // The getter attribute generate a function on `Pallet` placeholder: - // `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - #[pallet::getter(fn dummy)] pub(super) type Dummy = StorageValue<_, T::Balance>; // A map that has enumerable entries. #[pallet::storage] - #[pallet::getter(fn bar)] pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>; // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. #[pallet::storage] - #[pallet::getter(fn foo)] pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; #[pallet::storage] @@ -433,10 +425,10 @@ impl Pallet { fn accumulate_foo(origin: T::RuntimeOrigin, increase_by: T::Balance) -> DispatchResult { let _sender = ensure_signed(origin)?; - let prev = >::get(); + let prev = Foo::::get(); // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an // Option<> type. - let result = >::mutate(|foo| { + let result = Foo::::mutate(|foo| { *foo = foo.saturating_add(increase_by); *foo }); diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index 9434ace35ffe..207e46e428dd 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -119,25 +119,25 @@ fn it_works_for_optional_value() { // Check that GenesisBuilder works properly. let val1 = 42; let val2 = 27; - assert_eq!(Example::dummy(), Some(val1)); + assert_eq!(Dummy::::get(), Some(val1)); // Check that accumulate works when we have Some value in Dummy already. assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val2)); - assert_eq!(Example::dummy(), Some(val1 + val2)); + assert_eq!(Dummy::::get(), Some(val1 + val2)); // Check that accumulate works when we Dummy has None in it. >::on_initialize(2); assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val1)); - assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); + assert_eq!(Dummy::::get(), Some(val1 + val2 + val1)); }); } #[test] fn it_works_for_default_value() { new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); + assert_eq!(Foo::::get(), 24); assert_ok!(Example::accumulate_foo(RuntimeOrigin::signed(1), 1)); - assert_eq!(Example::foo(), 25); + assert_eq!(Foo::::get(), 25); }); } @@ -146,7 +146,7 @@ fn set_dummy_works() { new_test_ext().execute_with(|| { let test_val = 133; assert_ok!(Example::set_dummy(RuntimeOrigin::root(), test_val.into())); - assert_eq!(Example::dummy(), Some(test_val)); + assert_eq!(Dummy::::get(), Some(test_val)); }); } diff --git a/substrate/frame/examples/kitchensink/src/benchmarking.rs b/substrate/frame/examples/kitchensink/src/benchmarking.rs index 24da581fc967..5f1d378e06fe 100644 --- a/substrate/frame/examples/kitchensink/src/benchmarking.rs +++ b/substrate/frame/examples/kitchensink/src/benchmarking.rs @@ -51,7 +51,7 @@ mod benchmarks { set_foo(RawOrigin::Root, value, 10u128); // The execution phase is just running `set_foo` extrinsic call // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::foo(), Some(value)) + assert_eq!(Foo::::get(), Some(value)) } // This line generates test cases for benchmarking, and could be run by: diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 18429bc967d7..b7425b0c0846 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -125,7 +125,6 @@ pub mod pallet { #[pallet::storage] #[pallet::unbounded] // optional #[pallet::storage_prefix = "OtherFoo"] // optional - #[pallet::getter(fn foo)] // optional pub type Foo = StorageValue; #[pallet::type_value] diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index 6c1fa6ea8ec4..d21c8b4cfd24 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -332,7 +332,6 @@ pub mod pallet { /// /// This is used to calculate average price, should have bounded size. #[pallet::storage] - #[pallet::getter(fn prices)] pub(super) type Prices = StorageValue<_, BoundedVec, ValueQuery>; /// Defines the block when next unsigned transaction will be accepted. @@ -341,7 +340,6 @@ pub mod pallet { /// we only allow one transaction every `T::UnsignedInterval` blocks. /// This storage entry defines when new transaction is going to be accepted. #[pallet::storage] - #[pallet::getter(fn next_unsigned_at)] pub(super) type NextUnsignedAt = StorageValue<_, BlockNumberFor, ValueQuery>; } @@ -479,7 +477,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -513,7 +511,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -543,7 +541,7 @@ impl Pallet { ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if next_unsigned_at > block_number { return Err("Too early to send unsigned transaction") } @@ -664,7 +662,7 @@ impl Pallet { /// Calculate current average price. fn average_price() -> Option { - let prices = >::get(); + let prices = Prices::::get(); if prices.is_empty() { None } else { @@ -677,7 +675,7 @@ impl Pallet { new_price: &u32, ) -> TransactionValidity { // Now let's check if the transaction has any chance to succeed. - let next_unsigned_at = >::get(); + let next_unsigned_at = NextUnsignedAt::::get(); if &next_unsigned_at > block_number { return InvalidTransaction::Stale.into() } diff --git a/substrate/frame/examples/split/src/lib.rs b/substrate/frame/examples/split/src/lib.rs index 74d2e0cc24b7..5245d90e390c 100644 --- a/substrate/frame/examples/split/src/lib.rs +++ b/substrate/frame/examples/split/src/lib.rs @@ -107,7 +107,7 @@ pub mod pallet { let _who = ensure_signed(origin)?; // Read a value from storage. - match >::get() { + match Something::::get() { // Return an error if the value has not been set. None => return Err(Error::::NoneValue.into()), Some(old) => { From e76b244853c52d523e1d5c7a28948573df60df46 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 22 Feb 2024 01:35:01 +0100 Subject: [PATCH 20/51] [FRAME] Test for sane genesis default (#3412) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/polkadot-sdk/issues/2713 --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- cumulus/pallets/aura-ext/src/lib.rs | 6 --- .../chains/relays/rococo/src/genesis.rs | 2 +- .../chains/relays/westend/src/genesis.rs | 2 +- prdoc/pr_3412.prdoc | 17 ++++++++ .../cli/tests/res/default_genesis_config.json | 8 +++- substrate/bin/node/testing/src/genesis.rs | 39 ++----------------- substrate/client/chain-spec/src/chain_spec.rs | 10 +---- .../chain-spec/src/genesis_config_builder.rs | 2 +- .../client/consensus/babe/rpc/src/lib.rs | 2 +- substrate/frame/babe/src/lib.rs | 6 +-- substrate/frame/session/src/lib.rs | 18 +-------- .../src/construct_runtime/expand/config.rs | 11 ++++++ substrate/frame/support/src/lib.rs | 2 +- .../primitives/consensus/babe/src/lib.rs | 6 +++ .../test-utils/runtime/src/genesismap.rs | 1 - substrate/test-utils/runtime/src/lib.rs | 2 +- 16 files changed, 55 insertions(+), 79 deletions(-) create mode 100644 prdoc/pr_3412.prdoc diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 34a41557152d..31b571816a0c 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -117,12 +117,6 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { let authorities = Aura::::authorities(); - - assert!( - !authorities.is_empty(), - "AuRa authorities empty, maybe wrong order in `construct_runtime!`?", - ); - Authorities::::put(authorities); } } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 7db9679f1c3e..55437645b052 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -78,7 +78,7 @@ pub fn genesis() -> Storage { }, babe: rococo_runtime::BabeConfig { authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: rococo_runtime::BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, sudo: rococo_runtime::SudoConfig { diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 578b307dd933..700b80e63f6c 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -94,7 +94,7 @@ pub fn genesis() -> Storage { }, babe: westend_runtime::BabeConfig { authorities: Default::default(), - epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), + epoch_config: westend_runtime::BABE_GENESIS_EPOCH_CONFIG, ..Default::default() }, configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, diff --git a/prdoc/pr_3412.prdoc b/prdoc/pr_3412.prdoc new file mode 100644 index 000000000000..d68f05e45dcf --- /dev/null +++ b/prdoc/pr_3412.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[FRAME] Add genesis test and remove some checks" + +doc: + - audience: Runtime Dev + description: | + The construct_runtime macro now generates a test to assert that all `GenesisConfig`s of all + pallets can be build within the runtime. This ensures that the `BuildGenesisConfig` runtime + API works. + Further, some checks from a few pallets were removed to make this pass. + +crates: + - name: pallet-babe + - name: pallet-aura-ext + - name: pallet-session diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index 1465a6497cad..e21fbb47da8c 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -2,7 +2,13 @@ "system": {}, "babe": { "authorities": [], - "epochConfig": null + "epochConfig": { + "allowed_slots": "PrimaryAndSecondaryVRFSlots", + "c": [ + 1, + 4 + ] + } }, "indices": { "indices": [] diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs index 6ec21fbe0934..c79612d68444 100644 --- a/substrate/bin/node/testing/src/genesis.rs +++ b/substrate/bin/node/testing/src/genesis.rs @@ -20,9 +20,8 @@ use crate::keyring::*; use kitchensink_runtime::{ - constants::currency::*, AccountId, AssetsConfig, BabeConfig, BalancesConfig, GluttonConfig, - GrandpaConfig, IndicesConfig, RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, - StakingConfig, BABE_GENESIS_EPOCH_CONFIG, + constants::currency::*, AccountId, AssetsConfig, BalancesConfig, IndicesConfig, + RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, }; use sp_keyring::Ed25519Keyring; use sp_runtime::Perbill; @@ -47,7 +46,6 @@ pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); RuntimeGenesisConfig { - system: Default::default(), indices: IndicesConfig { indices: vec![] }, balances: BalancesConfig { balances: endowed }, session: SessionConfig { @@ -69,39 +67,8 @@ pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { invulnerables: vec![alice(), bob(), charlie()], ..Default::default() }, - babe: BabeConfig { - authorities: vec![], - epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - grandpa: GrandpaConfig { authorities: vec![], _config: Default::default() }, - beefy: Default::default(), - im_online: Default::default(), - authority_discovery: Default::default(), - democracy: Default::default(), - council: Default::default(), - technical_committee: Default::default(), - technical_membership: Default::default(), - elections: Default::default(), - sudo: Default::default(), - treasury: Default::default(), society: SocietyConfig { pot: 0 }, - vesting: Default::default(), assets: AssetsConfig { assets: vec![(9, alice(), true, 1)], ..Default::default() }, - pool_assets: Default::default(), - transaction_storage: Default::default(), - transaction_payment: Default::default(), - alliance: Default::default(), - alliance_motion: Default::default(), - nomination_pools: Default::default(), - safe_mode: Default::default(), - tx_pause: Default::default(), - glutton: GluttonConfig { - compute: Default::default(), - storage: Default::default(), - trash_data_count: Default::default(), - ..Default::default() - }, - mixnet: Default::default(), + ..Default::default() } } diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index fe8fcfda216e..78e81e10d2b6 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -1237,15 +1237,7 @@ mod tests { "TestName", "test", ChainType::Local, - move || substrate_test_runtime::RuntimeGenesisConfig { - babe: substrate_test_runtime::BabeConfig { - epoch_config: Some( - substrate_test_runtime::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION, - ), - ..Default::default() - }, - ..Default::default() - }, + || Default::default(), Vec::new(), None, None, diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 8766dd5c5ad2..6b956316203c 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -149,7 +149,7 @@ mod tests { ::new(substrate_test_runtime::wasm_binary_unwrap()) .get_default_config() .unwrap(); - let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"babe": {"authorities": [], "epochConfig": {"allowed_slots": "PrimaryAndSecondaryVRFSlots", "c": [1, 4]}}, "balances": {"balances": []}, "substrateTest": {"authorities": []}, "system": {}}"#; assert_eq!(from_str::(expected).unwrap(), config); } diff --git a/substrate/client/consensus/babe/rpc/src/lib.rs b/substrate/client/consensus/babe/rpc/src/lib.rs index 8b183e7dfdcd..a3e811baecff 100644 --- a/substrate/client/consensus/babe/rpc/src/lib.rs +++ b/substrate/client/consensus/babe/rpc/src/lib.rs @@ -258,7 +258,7 @@ mod tests { let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; let (response, _) = api.raw_json_request(request, 1).await.unwrap(); - let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; + let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[],"secondary_vrf":[1,2,4]}},"id":1}"#; assert_eq!(response, expected); } diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index a6e44390dbc5..5fb107dde3ba 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -323,7 +323,7 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - pub epoch_config: Option, + pub epoch_config: BabeEpochConfiguration, #[serde(skip)] pub _config: sp_std::marker::PhantomData, } @@ -333,9 +333,7 @@ pub mod pallet { fn build(&self) { SegmentIndex::::put(0); Pallet::::initialize_genesis_authorities(&self.authorities); - EpochConfig::::put( - self.epoch_config.clone().expect("epoch_config must not be None"), - ); + EpochConfig::::put(&self.epoch_config); } } diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 178d43f596b2..fc35cd6ddd82 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -460,27 +460,13 @@ pub mod pallet { ); self.keys.iter().map(|x| x.1.clone()).collect() }); - assert!( - !initial_validators_0.is_empty(), - "Empty validator set for session 0 in genesis block!" - ); let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); - assert!( - !initial_validators_1.is_empty(), - "Empty validator set for session 1 in genesis block!" - ); let queued_keys: Vec<_> = initial_validators_1 - .iter() - .cloned() - .map(|v| { - ( - v.clone(), - Pallet::::load_keys(&v).expect("Validator in session 1 missing keys!"), - ) - }) + .into_iter() + .filter_map(|v| Pallet::::load_keys(&v).map(|k| (v, k))) .collect(); // Tell everyone about the genesis session keys diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index ffe55bceb80e..5613047359a7 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -99,6 +99,17 @@ pub fn expand_outer_config( ::on_genesis(); } } + + /// Test the `Default` derive impl of the `RuntimeGenesisConfig`. + #[cfg(test)] + #[test] + fn test_genesis_config_builds() { + #scrate::__private::sp_io::TestExternalities::default().execute_with(|| { + ::build( + &RuntimeGenesisConfig::default() + ); + }); + } } } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 8935acf4e2bb..cd12da6de54e 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -780,7 +780,7 @@ macro_rules! assert_err_with_weight { $crate::assert_err!($call.map(|_| ()).map_err(|e| e.error), $err); assert_eq!(dispatch_err_with_post.post_info.actual_weight, $weight); } else { - panic!("expected Err(_), got Ok(_).") + ::core::panic!("expected Err(_), got Ok(_).") } }; } diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index d6b2cdd55e0d..ff0b4568226e 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -256,6 +256,12 @@ pub struct BabeEpochConfiguration { pub allowed_slots: AllowedSlots, } +impl Default for BabeEpochConfiguration { + fn default() -> Self { + Self { c: (1, 4), allowed_slots: AllowedSlots::PrimaryAndSecondaryVRFSlots } + } +} + /// Verifies the equivocation proof by making sure that: both headers have /// different hashes, are targetting the same slot, and have valid signatures by /// the same authority. diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 5ed9c8a64588..9e972886b377 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -124,7 +124,6 @@ impl GenesisStorageBuilder { .into_iter() .map(|x| (x.into(), 1)) .collect(), - epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), ..Default::default() }, substrate_test: substrate_test_pallet::GenesisConfig { diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 8bc6f72a82ec..db9ff187b707 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -1291,7 +1291,7 @@ mod tests { let r = Vec::::decode(&mut &r[..]).unwrap(); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; assert_eq!(expected.to_string(), json); } From 60e537b95f2336dc598cb9ce49823316841ee3dd Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Thu, 22 Feb 2024 15:22:31 +0700 Subject: [PATCH 21/51] Elastic scaling: use an assumed `CoreIndex` in `candidate-backing` (#3229) First step in implementing https://github.com/paritytech/polkadot-sdk/issues/3144 ### Summary of changes - switch statement `Table` candidate mapping from `ParaId` to `CoreIndex` - introduce experimental `InjectCoreIndex` node feature. - determine and assume a `CoreIndex` for a candidate based on statement validator index. If the signature is valid it means validator controls the validator that index and we can easily map it to a validator group/core. - introduce a temporary provisioner fix until we fully enable elastic scaling in the subystem. The fix ensures we don't fetch the same backable candidate when calling `get_backable_candidate` for each core. TODO: - [x] fix backing tests - [x] fix statement table tests - [x] add new test --------- Signed-off-by: Andrei Sandu Signed-off-by: alindima Co-authored-by: alindima --- Cargo.lock | 3 + polkadot/node/core/backing/Cargo.toml | 1 + polkadot/node/core/backing/src/error.rs | 3 + polkadot/node/core/backing/src/lib.rs | 285 ++++++++++++++---- polkadot/node/core/backing/src/tests/mod.rs | 128 +++++++- .../src/tests/prospective_parachains.rs | 34 ++- polkadot/node/core/provisioner/src/lib.rs | 12 +- polkadot/primitives/Cargo.toml | 2 + polkadot/primitives/src/v6/mod.rs | 27 +- polkadot/primitives/src/vstaging/mod.rs | 6 +- polkadot/statement-table/Cargo.toml | 1 + polkadot/statement-table/src/generic.rs | 50 ++- polkadot/statement-table/src/lib.rs | 6 +- 13 files changed, 462 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93ee9cc99c44..950b4e0889a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12490,6 +12490,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-statement-table", "sc-keystore", + "schnellru", "sp-application-crypto", "sp-core", "sp-keyring", @@ -13159,6 +13160,7 @@ version = "7.0.0" dependencies = [ "bitvec", "hex-literal", + "log", "parity-scale-codec", "polkadot-core-primitives", "polkadot-parachain-primitives", @@ -13556,6 +13558,7 @@ dependencies = [ "parity-scale-codec", "polkadot-primitives", "sp-core", + "tracing-gum", ] [[package]] diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index b0cf041e38da..f71b8df80dd2 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -22,6 +22,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } fatality = "0.0.6" +schnellru = "0.2.1" [dev-dependencies] sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/backing/src/error.rs b/polkadot/node/core/backing/src/error.rs index 1b00a62510b7..64955a393962 100644 --- a/polkadot/node/core/backing/src/error.rs +++ b/polkadot/node/core/backing/src/error.rs @@ -48,6 +48,9 @@ pub enum Error { #[error("Candidate is not found")] CandidateNotFound, + #[error("CoreIndex cannot be determined for a candidate")] + CoreIndexUnavailable, + #[error("Signature is invalid")] InvalidSignature, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 98bbd6232add..cc192607cea0 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,13 +70,14 @@ use std::{ sync::Arc, }; -use bitvec::vec::BitVec; +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, stream::FuturesOrdered, FutureExt, SinkExt, StreamExt, TryFutureExt, }; +use schnellru::{ByLength, LruMap}; use error::{Error, FatalResult}; use polkadot_node_primitives::{ @@ -104,10 +105,12 @@ use polkadot_node_subsystem_util::{ Validator, }; use polkadot_primitives::{ + vstaging::{node_features::FeatureIndex, NodeFeatures}, BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, - CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, - ValidatorIndex, ValidatorSignature, ValidityAttestation, + CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, GroupIndex, GroupRotationInfo, + Hash, Id as ParaId, IndexedVec, PersistedValidationData, PvfExecKind, SessionIndex, + SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::KeystorePtr; use statement_table::{ @@ -118,7 +121,7 @@ use statement_table::{ }, Config as TableConfig, Context as TableContextTrait, Table, }; -use util::vstaging::get_disabled_validators_with_fallback; +use util::{runtime::request_node_features, vstaging::get_disabled_validators_with_fallback}; mod error; @@ -209,7 +212,9 @@ struct PerRelayParentState { /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// The `ParaId` assigned to the local validator at this relay parent. - assignment: Option, + assigned_para: Option, + /// The `CoreIndex` assigned to the local validator at this relay parent. + assigned_core: Option, /// The candidates that are backed by enough validators in their group, by hash. backed: HashSet, /// The table of candidates and statements under this relay-parent. @@ -224,6 +229,15 @@ struct PerRelayParentState { fallbacks: HashMap, /// The minimum backing votes threshold. minimum_backing_votes: u32, + /// If true, we're appending extra bits in the BackedCandidate validator indices bitfield, + /// which represent the assigned core index. True if ElasticScalingMVP is enabled. + inject_core_index: bool, + /// The core states for all cores. + cores: Vec, + /// The validator index -> group mapping at this relay parent. + validator_to_group: Arc>>, + /// The associated group rotation information. + group_rotation_info: GroupRotationInfo, } struct PerCandidateState { @@ -275,6 +289,9 @@ struct State { /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, + /// Cache the per-session Validator->Group mapping. + validator_to_group_cache: + LruMap>>>, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -292,6 +309,7 @@ impl State { per_leaf: HashMap::default(), per_relay_parent: HashMap::default(), per_candidate: HashMap::new(), + validator_to_group_cache: LruMap::new(ByLength::new(2)), background_validation_tx, keystore, } @@ -379,10 +397,10 @@ struct AttestingData { backing: Vec, } -#[derive(Default)] +#[derive(Default, Debug)] struct TableContext { validator: Option, - groups: HashMap>, + groups: HashMap>, validators: Vec, disabled_validators: Vec, } @@ -404,7 +422,7 @@ impl TableContext { impl TableContextTrait for TableContext { type AuthorityId = ValidatorIndex; type Digest = CandidateHash; - type GroupId = ParaId; + type GroupId = CoreIndex; type Signature = ValidatorSignature; type Candidate = CommittedCandidateReceipt; @@ -412,15 +430,11 @@ impl TableContextTrait for TableContext { candidate.hash() } - fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { - candidate.descriptor().para_id + fn is_member_of(&self, authority: &ValidatorIndex, core: &CoreIndex) -> bool { + self.groups.get(core).map_or(false, |g| g.iter().any(|a| a == authority)) } - fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) - } - - fn get_group_size(&self, group: &ParaId) -> Option { + fn get_group_size(&self, group: &CoreIndex) -> Option { self.groups.get(group).map(|g| g.len()) } } @@ -442,19 +456,20 @@ fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedSt fn table_attested_to_backed( attested: TableAttestedCandidate< - ParaId, + CoreIndex, CommittedCandidateReceipt, ValidatorIndex, ValidatorSignature, >, table_context: &TableContext, + inject_core_index: bool, ) -> Option { - let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; + let TableAttestedCandidate { candidate, validity_votes, group_id: core_index } = attested; let (ids, validity_votes): (Vec<_>, Vec) = validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - let group = table_context.groups.get(¶_id)?; + let group = table_context.groups.get(&core_index)?; let mut validator_indices = BitVec::with_capacity(group.len()); @@ -479,6 +494,12 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); + if inject_core_index { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + validator_indices.extend(core_index_to_inject); + } + Some(BackedCandidate { candidate, validity_votes: vote_positions @@ -971,7 +992,14 @@ async fn handle_active_leaves_update( // construct a `PerRelayParent` from the runtime API // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; + let per = construct_per_relay_parent_state( + ctx, + maybe_new, + &state.keystore, + &mut state.validator_to_group_cache, + mode, + ) + .await?; if let Some(per) = per { state.per_relay_parent.insert(maybe_new, per); @@ -981,31 +1009,112 @@ async fn handle_active_leaves_update( Ok(()) } +macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(err) => { + // Only bubble up fatal errors. + error::log_error(Err(Into::::into(err).into()))?; + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None) + }, + } + }; +} + +fn core_index_from_statement( + validator_to_group: &IndexedVec>, + group_rotation_info: &GroupRotationInfo, + cores: &[CoreState], + statement: &SignedFullStatementWithPVD, +) -> Option { + let compact_statement = statement.as_unchecked(); + let candidate_hash = CandidateHash(*compact_statement.unchecked_payload().candidate_hash()); + + let n_cores = cores.len(); + + gum::trace!( + target:LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len(), + ?candidate_hash, + "Extracting core index from statement" + ); + + let statement_validator_index = statement.validator_index(); + let Some(Some(group_index)) = validator_to_group.get(statement_validator_index) else { + gum::debug!( + target: LOG_TARGET, + ?group_rotation_info, + ?statement, + ?validator_to_group, + n_cores = ?cores.len() , + ?candidate_hash, + "Invalid validator index: {:?}", + statement_validator_index + ); + return None + }; + + // First check if the statement para id matches the core assignment. + let core_index = group_rotation_info.core_for_group(*group_index, n_cores); + + if core_index.0 as usize > n_cores { + gum::warn!(target: LOG_TARGET, ?candidate_hash, ?core_index, n_cores, "Invalid CoreIndex"); + return None + } + + if let StatementWithPVD::Seconded(candidate, _pvd) = statement.payload() { + let candidate_para_id = candidate.descriptor.para_id; + let assigned_para_id = match &cores[core_index.0 as usize] { + CoreState::Free => { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Invalid CoreIndex, core is not assigned to any para_id"); + return None + }, + CoreState::Occupied(occupied) => + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + return None + }, + CoreState::Scheduled(scheduled) => scheduled.para_id, + }; + + if assigned_para_id != candidate_para_id { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?core_index, + ?assigned_para_id, + ?candidate_para_id, + "Invalid CoreIndex, core is assigned to a different para_id" + ); + return None + } + return Some(core_index) + } else { + return Some(core_index) + } +} + /// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &KeystorePtr, + validator_to_group_cache: &mut LruMap< + SessionIndex, + Arc>>, + >, mode: ProspectiveParachainsMode, ) -> Result, Error> { - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(err) => { - // Only bubble up fatal errors. - error::log_error(Err(Into::::into(err).into()))?; - - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(None) - }, - } - }; - } - let parent = relay_parent; let (session_index, validators, groups, cores) = futures::try_join!( @@ -1020,6 +1129,16 @@ async fn construct_per_relay_parent_state( .map_err(Error::JoinMultiple)?; let session_index = try_runtime_api!(session_index); + + let inject_core_index = request_node_features(parent, session_index, ctx.sender()) + .await? + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + gum::debug!(target: LOG_TARGET, inject_core_index, ?parent, "New state"); + let validators: Vec<_> = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); let cores = try_runtime_api!(cores); @@ -1055,18 +1174,24 @@ async fn construct_per_relay_parent_state( }, }; - let mut groups = HashMap::new(); let n_cores = cores.len(); - let mut assignment = None; - for (idx, core) in cores.into_iter().enumerate() { + let mut groups = HashMap::>::new(); + let mut assigned_core = None; + let mut assigned_para = None; + + for (idx, core) in cores.iter().enumerate() { let core_para_id = match core { CoreState::Scheduled(scheduled) => scheduled.para_id, CoreState::Occupied(occupied) => if mode.is_enabled() { // Async backing makes it legal to build on top of // occupied core. - occupied.candidate_descriptor.para_id + if let Some(next) = &occupied.next_up_on_available { + next.para_id + } else { + continue + } } else { continue }, @@ -1077,11 +1202,27 @@ async fn construct_per_relay_parent_state( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some(core_para_id); + assigned_para = Some(core_para_id); + assigned_core = Some(core_index); } - groups.insert(core_para_id, g.clone()); + groups.insert(core_index, g.clone()); } } + gum::debug!(target: LOG_TARGET, ?groups, "TableContext"); + + let validator_to_group = validator_to_group_cache + .get_or_insert(session_index, || { + let mut vector = vec![None; validators.len()]; + + for (group_idx, validator_group) in validator_groups.iter().enumerate() { + for validator in validator_group { + vector[validator.0 as usize] = Some(GroupIndex(group_idx as u32)); + } + } + + Arc::new(IndexedVec::<_, _>::from(vector)) + }) + .expect("Just inserted"); let table_context = TableContext { validator, groups, validators, disabled_validators }; let table_config = TableConfig { @@ -1094,7 +1235,8 @@ async fn construct_per_relay_parent_state( Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, - assignment, + assigned_core, + assigned_para, backed: HashSet::new(), table: Table::new(table_config), table_context, @@ -1102,6 +1244,10 @@ async fn construct_per_relay_parent_state( awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), minimum_backing_votes, + inject_core_index, + cores, + validator_to_group: validator_to_group.clone(), + group_rotation_info, })) } @@ -1519,15 +1665,16 @@ async fn import_statement( per_candidate: &mut HashMap, statement: &SignedFullStatementWithPVD, ) -> Result, Error> { + let candidate_hash = statement.payload().candidate_hash(); + gum::debug!( target: LOG_TARGET, statement = ?statement.payload().to_compact(), validator_index = statement.validator_index().0, + ?candidate_hash, "Importing statement", ); - let candidate_hash = statement.payload().candidate_hash(); - // If this is a new candidate (statement is 'seconded' and candidate is unknown), // we need to create an entry in the `PerCandidateState` map. // @@ -1593,7 +1740,15 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - Ok(rp_state.table.import_statement(&rp_state.table_context, stmt)) + let core = core_index_from_statement( + &rp_state.validator_to_group, + &rp_state.group_rotation_info, + &rp_state.cores, + statement, + ) + .ok_or(Error::CoreIndexUnavailable)?; + + Ok(rp_state.table.import_statement(&rp_state.table_context, core, stmt)) } /// Handles a summary received from [`import_statement`] and dispatches `Backed` notifications and @@ -1615,7 +1770,11 @@ async fn post_import_statement_actions( // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + if let Some(backed) = table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) { let para_id = backed.candidate.descriptor.para_id; gum::debug!( target: LOG_TARGET, @@ -1654,8 +1813,14 @@ async fn post_import_statement_actions( ); ctx.send_unbounded_message(message); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Cannot get BackedCandidate"); } + } else { + gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate already known"); } + } else { + gum::debug!(target: LOG_TARGET, "No attested candidate"); } issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); @@ -1859,9 +2024,10 @@ async fn maybe_validate_and_import( let candidate_hash = summary.candidate; - if Some(summary.group_id) != rp_state.assignment { + if Some(summary.group_id) != rp_state.assigned_core { return Ok(()) } + let attesting = match statement.payload() { StatementWithPVD::Seconded(receipt, _) => { let attesting = AttestingData { @@ -2004,10 +2170,11 @@ async fn handle_second_message( } // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assignment { + if Some(candidate.descriptor().para_id) != rp_state.assigned_para { gum::debug!( target: LOG_TARGET, - our_assignment = ?rp_state.assignment, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, collation = ?candidate.descriptor().para_id, "Subsystem asked to second for para outside of our assignment", ); @@ -2015,6 +2182,14 @@ async fn handle_second_message( return Ok(()) } + gum::debug!( + target: LOG_TARGET, + our_assignment_core = ?rp_state.assigned_core, + our_assignment_para = ?rp_state.assigned_para, + collation = ?candidate.descriptor().para_id, + "Current assignments vs collation", + ); + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not signed a Valid statement for the requested candidate. // @@ -2087,7 +2262,13 @@ fn handle_get_backed_candidates_message( &rp_state.table_context, rp_state.minimum_backing_votes, ) - .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) + .and_then(|attested| { + table_attested_to_backed( + attested, + &rp_state.table_context, + rp_state.inject_core_index, + ) + }) }) .collect(); diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 1957f4e19c54..7223f1e1dfb0 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -65,13 +65,14 @@ fn dummy_pvd() -> PersistedValidationData { } } -struct TestState { +pub(crate) struct TestState { chain_ids: Vec, keystore: KeystorePtr, validators: Vec, validator_public: Vec, validation_data: PersistedValidationData, validator_groups: (Vec>, GroupRotationInfo), + validator_to_group: IndexedVec>, availability_cores: Vec, head_data: HashMap, signing_context: SigningContext, @@ -114,6 +115,11 @@ impl Default for TestState { .into_iter() .map(|g| g.into_iter().map(ValidatorIndex).collect()) .collect(); + let validator_to_group: IndexedVec<_, _> = + vec![Some(0), Some(1), Some(0), Some(0), None, Some(0)] + .into_iter() + .map(|x| x.map(|x| GroupIndex(x))) + .collect(); let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; @@ -143,6 +149,7 @@ impl Default for TestState { validators, validator_public, validator_groups: (validator_groups, group_rotation_info), + validator_to_group, availability_cores, head_data, validation_data, @@ -285,6 +292,16 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -639,6 +656,107 @@ fn backing_works() { }); } +#[test] +fn extract_core_index_from_statement_works() { + let test_state = TestState::default(); + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); + + let pov_hash = pov_a.hash(); + + let mut candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), + ..Default::default() + } + .build(); + + let public2 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_1 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let public1 = Keystore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[1].to_seed()), + ) + .expect("Insert key into keystore"); + + let signed_statement_2 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + candidate.descriptor.para_id = test_state.chain_ids[1]; + + let signed_statement_3 = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate, pvd_a.clone()), + &test_state.signing_context, + ValidatorIndex(1), + &public1.into(), + ) + .ok() + .flatten() + .expect("should be signed"); + + let core_index_1 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_1, + ) + .unwrap(); + + assert_eq!(core_index_1, CoreIndex(0)); + + let core_index_2 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_2, + ); + + // Must be none, para_id in descriptor is different than para assigned to core + assert_eq!(core_index_2, None); + + let core_index_3 = core_index_from_statement( + &test_state.validator_to_group, + &test_state.validator_groups.1, + &test_state.availability_cores, + &signed_statement_3, + ) + .unwrap(); + + assert_eq!(core_index_3, CoreIndex(1)); +} + #[test] fn backing_works_while_validation_ongoing() { let test_state = TestState::default(); @@ -1422,7 +1540,7 @@ fn backing_works_after_failed_validation() { fn candidate_backing_reorders_votes() { use sp_core::Encode; - let para_id = ParaId::from(10); + let core_idx = CoreIndex(10); let validators = vec![ Sr25519Keyring::Alice, Sr25519Keyring::Bob, @@ -1436,7 +1554,7 @@ fn candidate_backing_reorders_votes() { let validator_groups = { let mut validator_groups = HashMap::new(); validator_groups - .insert(para_id, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); + .insert(core_idx, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect()); validator_groups }; @@ -1466,10 +1584,10 @@ fn candidate_backing_reorders_votes() { (ValidatorIndex(3), fake_attestation(3)), (ValidatorIndex(1), fake_attestation(1)), ], - group_id: para_id, + group_id: core_idx, }; - let backed = table_attested_to_backed(attested, &table_context).unwrap(); + let backed = table_attested_to_backed(attested, &table_context, false).unwrap(); let expected_bitvec = { let mut validator_indices = BitVec::::with_capacity(6); diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 578f21bef665..94310d2aa164 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -185,6 +185,16 @@ async fn activate_leaf( } ); + // Node features request from runtime: all features are disabled. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) + ) if parent == hash => { + tx.send(Ok(Default::default())).unwrap(); + } + ); + // Check if subsystem job issues a request for the minimum backing votes. assert_matches!( virtual_overseer.recv().await, @@ -305,10 +315,11 @@ async fn assert_hypothetical_frontier_requests( ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, - None => panic!( + None => + panic!( "unexpected hypothetical frontier request, no match found for {:?}", request - ), + ), }; let resp = std::mem::take(&mut expected_requests[idx].1); tx.send(resp).unwrap(); @@ -1268,6 +1279,7 @@ fn concurrent_dependent_candidates() { let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, // send a second one without blocking. let _ = virtual_overseer @@ -1388,7 +1400,19 @@ fn concurrent_dependent_candidates() { assert_eq!(sess_idx, 1); tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::ValidatorGroups(tx), + )) => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + }, _ => panic!("unexpected message received from overseer: {:?}", msg), } } @@ -1419,7 +1443,6 @@ fn seconding_sanity_check_occupy_same_depth() { let leaf_parent = get_parent_hash(leaf_hash); let activated = new_leaf(leaf_hash, LEAF_BLOCK_NUMBER); - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; @@ -1555,13 +1578,14 @@ fn occupied_core_assignment() { const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; + let previous_para_id = test_state.chain_ids[1]; // Set the core state to occupied. let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); - candidate_descriptor.para_id = para_id; + candidate_descriptor.para_id = previous_para_id; test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { group_responsible: Default::default(), - next_up_on_available: None, + next_up_on_available: Some(ScheduledCore { para_id, collator: None }), occupied_since: 100_u32, time_out_at: 200_u32, next_up_on_time_out: None, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 51f768d782e0..d98f6ebfe428 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -681,10 +681,17 @@ async fn request_backable_candidates( CoreState::Free => continue, }; + // We should be calling this once per para rather than per core. + // TODO: Will be fixed in https://github.com/paritytech/polkadot-sdk/pull/3233. + // For now, at least make sure we don't supply the same candidate multiple times in case a + // para has multiple cores scheduled. let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match response { - Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), + Some((hash, relay_parent)) => { + if !selected_candidates.iter().any(|bc| &(hash, relay_parent) == bc) { + selected_candidates.push((hash, relay_parent)) + } + }, None => { gum::debug!( target: LOG_TARGET, @@ -726,6 +733,7 @@ async fn select_candidates( ) .await?, }; + gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates"); // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 58d3d1001610..e63fb621c788 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -14,6 +14,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "se hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } +log = { workspace = true, default-features = false } serde = { features = ["alloc", "derive"], workspace = true } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } @@ -38,6 +39,7 @@ std = [ "application-crypto/std", "bitvec/std", "inherents/std", + "log/std", "parity-scale-codec/std", "polkadot-core-primitives/std", "polkadot-parachain-primitives/std", diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 4938d20d2d1b..538eb3855848 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -72,6 +72,7 @@ pub use metrics::{ /// The key type ID for a collator key. pub const COLLATOR_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"coll"); +const LOG_TARGET: &str = "runtime::primitives"; mod collator_app { use application_crypto::{app_crypto, sr25519}; @@ -746,17 +747,29 @@ impl BackedCandidate { /// /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. -pub fn check_candidate_backing + Clone + Encode>( +pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( backed: &BackedCandidate, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { if backed.validator_indices.len() != group_len { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: indices mismatch: group_len = {} , indices_len = {}", + group_len, + backed.validator_indices.len(), + ); return Err(()) } if backed.validity_votes.len() > group_len { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: Too many votes, expected: {}, found: {}", + group_len, + backed.validity_votes.len(), + ); return Err(()) } @@ -778,11 +791,23 @@ pub fn check_candidate_backing + Clone + Encode>( if sig.verify(&payload[..], &validator_id) { signed += 1; } else { + log::debug!( + target: LOG_TARGET, + "Check candidate backing: Invalid signature. validator_id = {:?}, validator_index = {} ", + validator_id, + val_in_group_idx, + ); return Err(()) } } if signed != backed.validity_votes.len() { + log::error!( + target: LOG_TARGET, + "Check candidate backing: Too many signatures, expected = {}, found = {}", + backed.validity_votes.len() , + signed, + ); return Err(()) } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 630bcf8679ad..39d9dfc02c5b 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -64,9 +64,13 @@ pub mod node_features { /// Tells if tranch0 assignments could be sent in a single certificate. /// Reserved for: `` EnableAssignmentsV2 = 0, + /// This feature enables the extension of `BackedCandidate::validator_indices` by 8 bits. + /// The value stored there represents the assumed core index where the candidates + /// are backed. This is needed for the elastic scaling MVP. + ElasticScalingMVP = 1, /// First unassigned feature bit. /// Every time a new feature flag is assigned it should take this value. /// and this should be incremented. - FirstUnassigned = 1, + FirstUnassigned = 2, } } diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 6403b822ed9b..37b8a99d640a 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -13,3 +13,4 @@ workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-core = { path = "../../substrate/primitives/core" } primitives = { package = "polkadot-primitives", path = "../primitives" } +gum = { package = "tracing-gum", path = "../node/gum" } diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 22bffde5acc1..2ee6f6a4f781 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -36,6 +36,7 @@ use primitives::{ }; use parity_scale_codec::{Decode, Encode}; +const LOG_TARGET: &str = "parachain::statement-table"; /// Context for the statement table. pub trait Context { @@ -53,9 +54,6 @@ pub trait Context { /// get the digest of a candidate. fn candidate_digest(candidate: &Self::Candidate) -> Self::Digest; - /// get the group of a candidate. - fn candidate_group(candidate: &Self::Candidate) -> Self::GroupId; - /// Whether a authority is a member of a group. /// Members are meant to submit candidates and vote on validity. fn is_member_of(&self, authority: &Self::AuthorityId, group: &Self::GroupId) -> bool; @@ -342,13 +340,13 @@ impl Table { pub fn import_statement( &mut self, context: &Ctx, + group_id: Ctx::GroupId, statement: SignedStatement, ) -> Option> { let SignedStatement { statement, signature, sender: signer } = statement; - let res = match statement { Statement::Seconded(candidate) => - self.import_candidate(context, signer.clone(), candidate, signature), + self.import_candidate(context, signer.clone(), candidate, signature, group_id), Statement::Valid(digest) => self.validity_vote(context, signer.clone(), digest, ValidityVote::Valid(signature)), }; @@ -387,9 +385,10 @@ impl Table { authority: Ctx::AuthorityId, candidate: Ctx::Candidate, signature: Ctx::Signature, + group: Ctx::GroupId, ) -> ImportResult { - let group = Ctx::candidate_group(&candidate); if !context.is_member_of(&authority, &group) { + gum::debug!(target: LOG_TARGET, authority = ?authority, group = ?group, "New `Misbehavior::UnauthorizedStatement`, candidate backed by validator that doesn't belong to expected group" ); return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement { statement: SignedStatement { signature, @@ -634,10 +633,6 @@ mod tests { Digest(candidate.1) } - fn candidate_group(candidate: &Candidate) -> GroupId { - GroupId(candidate.0) - } - fn is_member_of(&self, authority: &AuthorityId, group: &GroupId) -> bool { self.authorities.get(authority).map(|v| v == group).unwrap_or(false) } @@ -675,10 +670,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::MultipleCandidates(MultipleCandidates { @@ -711,10 +706,10 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement_a); + table.import_statement(&context, GroupId(2), statement_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); - table.import_statement(&context, statement_b); + table.import_statement(&context, GroupId(2), statement_b); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -735,7 +730,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], @@ -769,7 +764,7 @@ mod tests { }; let candidate_a_digest = Digest(100); - table.import_statement(&context, candidate_a); + table.import_statement(&context, GroupId(2), candidate_a); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); @@ -779,7 +774,7 @@ mod tests { signature: Signature(2), sender: AuthorityId(2), }; - table.import_statement(&context, bad_validity_vote); + table.import_statement(&context, GroupId(3), bad_validity_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(2)][0], @@ -811,7 +806,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let invalid_statement = SignedStatement { @@ -820,7 +815,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, invalid_statement); + table.import_statement(&context, GroupId(2), invalid_statement); assert!(table.detected_misbehavior.contains_key(&AuthorityId(1))); } @@ -842,7 +837,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let extra_vote = SignedStatement { @@ -851,7 +846,7 @@ mod tests { sender: AuthorityId(1), }; - table.import_statement(&context, extra_vote); + table.import_statement(&context, GroupId(2), extra_vote); assert_eq!( table.detected_misbehavior[&AuthorityId(1)][0], Misbehavior::ValidityDoubleVote(ValidityDoubleVote::IssuedAndValidity( @@ -910,7 +905,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_none()); @@ -921,7 +916,7 @@ mod tests { sender: AuthorityId(2), }; - table.import_statement(&context, vote); + table.import_statement(&context, GroupId(2), vote); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); assert!(table.attested_candidate(&candidate_digest, &context, 2).is_some()); } @@ -944,7 +939,7 @@ mod tests { }; let summary = table - .import_statement(&context, statement) + .import_statement(&context, GroupId(2), statement) .expect("candidate import to give summary"); assert_eq!(summary.candidate, Digest(100)); @@ -971,7 +966,7 @@ mod tests { }; let candidate_digest = Digest(100); - table.import_statement(&context, statement); + table.import_statement(&context, GroupId(2), statement); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); let vote = SignedStatement { @@ -980,8 +975,9 @@ mod tests { sender: AuthorityId(2), }; - let summary = - table.import_statement(&context, vote).expect("candidate vote to give summary"); + let summary = table + .import_statement(&context, GroupId(2), vote) + .expect("candidate vote to give summary"); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); diff --git a/polkadot/statement-table/src/lib.rs b/polkadot/statement-table/src/lib.rs index d4629330ac01..3740d15cc4f3 100644 --- a/polkadot/statement-table/src/lib.rs +++ b/polkadot/statement-table/src/lib.rs @@ -35,8 +35,8 @@ pub use generic::{Config, Context, Table}; pub mod v2 { use crate::generic; use primitives::{ - CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, Id, - ValidatorIndex, ValidatorSignature, + CandidateHash, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + CoreIndex, ValidatorIndex, ValidatorSignature, }; /// Statements about candidates on the network. @@ -59,7 +59,7 @@ pub mod v2 { >; /// A summary of import of a statement. - pub type Summary = generic::Summary; + pub type Summary = generic::Summary; impl<'a> From<&'a Statement> for PrimitiveStatement { fn from(s: &'a Statement) -> PrimitiveStatement { From 61ecef444fbd9e1eb4138123f35ecba32fe77565 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 22 Feb 2024 16:48:52 +0700 Subject: [PATCH 22/51] Add matrix public channels for release announcements (#3430) This PR adds two more public channels in matrix where should the release announcements go --- .github/workflows/release-99_notif-published.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/release-99_notif-published.yml b/.github/workflows/release-99_notif-published.yml index 397795fafa4e..732db15d9c0c 100644 --- a/.github/workflows/release-99_notif-published.yml +++ b/.github/workflows/release-99_notif-published.yml @@ -32,6 +32,12 @@ jobs: - name: '#polkadotvalidatorlounge:web3.foundation' room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io' pre-releases: false + - name: '#polkadot-announcements:parity.io' + room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io' + pre-releases: false + - name: '#kusama-announce:parity.io' + room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io' + pre-releases: false steps: - name: Matrix notification to ${{ matrix.channel.name }} From 822082807fd6f146cd1c0561dc340dedab463c40 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 22 Feb 2024 19:03:12 +0900 Subject: [PATCH 23/51] Fix `wasm-builder` not exiting if compilation fails (#3439) This PR fixes a subtle bug in `wasm-builder` first introduced in https://github.com/paritytech/polkadot-sdk/pull/1851 (sorry, my bad! I should have caught this during review) where the status code of the `cargo` subprocess is not properly checked, which results in builds silently succeeding when they shouldn't (that is: if we successfully build a runtime blob, and then modify the code so that it won't compile, and recompile it again, then the build will succeed and silently use the *old* blob). cc @athei This is the bug you were seeing. [edit]Also fixes a similar PolkaVM-specific bug where I accidentally used the wrong comparison operator.[/edit] --- substrate/utils/wasm-builder/src/wasm_project.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index bc8c9b3b4b39..9ffb5c72fd95 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -855,7 +855,7 @@ fn build_bloaty_blob( println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); // Use `process::exit(1)` to have a clean error output. - if build_cmd.status().map(|s| s.success()).is_err() { + if !matches!(build_cmd.status().map(|s| s.success()), Ok(true)) { process::exit(1); } @@ -877,7 +877,7 @@ fn build_bloaty_blob( if polkavm_path .metadata() .map(|polkavm_metadata| { - polkavm_metadata.modified().unwrap() >= elf_metadata.modified().unwrap() + polkavm_metadata.modified().unwrap() < elf_metadata.modified().unwrap() }) .unwrap_or(true) { From 31546c8d248573e9af33836883d414057a0cf4c9 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 22 Feb 2024 13:44:41 +0200 Subject: [PATCH 24/51] sc-consensus-beefy: pump gossip engine while waiting for initialization conditions (#3435) As part of BEEFY worker/voter initialization the task waits for certain chain and backend conditions to be fulfilled: - BEEFY consensus enabled on-chain & GRANDPA best finalized higher than on-chain BEEFY genesis block, - backend has synced headers for BEEFY mandatory blocks between best BEEFY and best GRANDPA. During this waiting time, any messages gossiped on the BEEFY topic for current chain get enqueued in the gossip engine, leading to RAM bloating and output warning/error messages when the wait time is non-negligible (like during a clean sync). This PR adds logic to pump the gossip engine while waiting for other things to make sure gossiped messages get consumed (practically discarded until worker is fully initialized). Also raises the warning threshold for enqueued messages from 10k to 100k. This is in line with the other gossip protocols on the node. Fixes https://github.com/paritytech/polkadot-sdk/issues/3390 --------- Signed-off-by: Adrian Catangiu --- prdoc/pr_3435.prdoc | 16 + .../beefy/src/communication/gossip.rs | 35 +- .../consensus/beefy/src/communication/mod.rs | 2 - .../client/consensus/beefy/src/import.rs | 2 +- substrate/client/consensus/beefy/src/lib.rs | 414 +++++++++++++---- substrate/client/consensus/beefy/src/tests.rs | 116 +---- .../client/consensus/beefy/src/worker.rs | 419 +++++------------- .../consensus/beefy/src/commitment.rs | 21 + 8 files changed, 520 insertions(+), 505 deletions(-) create mode 100644 prdoc/pr_3435.prdoc diff --git a/prdoc/pr_3435.prdoc b/prdoc/pr_3435.prdoc new file mode 100644 index 000000000000..7d7896bff7d4 --- /dev/null +++ b/prdoc/pr_3435.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix BEEFY-related gossip messages error logs + +doc: + - audience: Node Operator + description: | + Added logic to pump the gossip engine while waiting for other things + to make sure gossiped messages get consumed (practically discarded + until worker is fully initialized). + This fixes an issue where node operators saw error logs, and fixes + potential RAM bloat when BEEFY initialization takes a long time + (i.e. during clean sync). +crates: + - name: sc-consensus-beefy diff --git a/substrate/client/consensus/beefy/src/communication/gossip.rs b/substrate/client/consensus/beefy/src/communication/gossip.rs index 8a0b0a74308f..eb43c9173d75 100644 --- a/substrate/client/consensus/beefy/src/communication/gossip.rs +++ b/substrate/client/consensus/beefy/src/communication/gossip.rs @@ -56,6 +56,8 @@ pub(super) enum Action { Keep(H, ReputationChange), // discard, applying cost/benefit to originator. Discard(ReputationChange), + // ignore, no cost/benefit applied to originator. + DiscardNoReport, } /// An outcome of examining a message. @@ -68,7 +70,7 @@ enum Consider { /// Message is from the future. Reject. RejectFuture, /// Message cannot be evaluated. Reject. - RejectOutOfScope, + CannotEvaluate, } /// BEEFY gossip message type that gets encoded and sent on the network. @@ -168,18 +170,14 @@ impl Filter { .as_ref() .map(|f| // only from current set and only [filter.start, filter.end] - if set_id < f.validator_set.id() { + if set_id < f.validator_set.id() || round < f.start { Consider::RejectPast - } else if set_id > f.validator_set.id() { - Consider::RejectFuture - } else if round < f.start { - Consider::RejectPast - } else if round > f.end { + } else if set_id > f.validator_set.id() || round > f.end { Consider::RejectFuture } else { Consider::Accept }) - .unwrap_or(Consider::RejectOutOfScope) + .unwrap_or(Consider::CannotEvaluate) } /// Return true if `round` is >= than `max(session_start, best_beefy)`, @@ -199,7 +197,7 @@ impl Filter { Consider::Accept } ) - .unwrap_or(Consider::RejectOutOfScope) + .unwrap_or(Consider::CannotEvaluate) } /// Add new _known_ `round` to the set of seen valid justifications. @@ -244,7 +242,7 @@ where pub(crate) fn new( known_peers: Arc>>, ) -> (GossipValidator, TracingUnboundedReceiver) { - let (tx, rx) = tracing_unbounded("mpsc_beefy_gossip_validator", 10_000); + let (tx, rx) = tracing_unbounded("mpsc_beefy_gossip_validator", 100_000); let val = GossipValidator { votes_topic: votes_topic::(), justifs_topic: proofs_topic::(), @@ -289,7 +287,9 @@ where match filter.consider_vote(round, set_id) { Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), - Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the vote without punishing or rewarding the sending peer. + Consider::CannotEvaluate => return Action::DiscardNoReport, Consider::Accept => {}, } @@ -330,7 +330,9 @@ where match guard.consider_finality_proof(round, set_id) { Consider::RejectPast => return Action::Discard(cost::OUTDATED_MESSAGE), Consider::RejectFuture => return Action::Discard(cost::FUTURE_MESSAGE), - Consider::RejectOutOfScope => return Action::Discard(cost::OUT_OF_SCOPE_MESSAGE), + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the proof without punishing or rewarding the sending peer. + Consider::CannotEvaluate => return Action::DiscardNoReport, Consider::Accept => {}, } @@ -357,7 +359,9 @@ where Action::Keep(self.justifs_topic, benefit::VALIDATED_PROOF) } }) - .unwrap_or(Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)) + // When we can't evaluate, it's our fault (e.g. filter not initialized yet), we + // discard the proof without punishing or rewarding the sending peer. + .unwrap_or(Action::DiscardNoReport) }; if matches!(action, Action::Keep(_, _)) { self.gossip_filter.write().mark_round_as_proven(round); @@ -404,6 +408,7 @@ where self.report(*sender, cb); ValidationResult::Discard }, + Action::DiscardNoReport => ValidationResult::Discard, } } @@ -579,8 +584,8 @@ pub(crate) mod tests { // filter not initialized let res = gv.validate(&mut context, &sender, &encoded); assert!(matches!(res, ValidationResult::Discard)); - expected_report.cost_benefit = cost::OUT_OF_SCOPE_MESSAGE; - assert_eq!(report_stream.try_recv().unwrap(), expected_report); + // nothing reported + assert!(report_stream.try_recv().is_err()); gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); // nothing in cache first time diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 3827559057dd..6fda63688e69 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -90,8 +90,6 @@ mod cost { pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "BEEFY: Bad signature"); // Message received with vote from voter not in validator set. pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "BEEFY: Unknown voter"); - // A message received that cannot be evaluated relative to our current state. - pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "BEEFY: Out-of-scope message"); // Message containing invalid proof. pub(super) const INVALID_PROOF: Rep = Rep::new(-5000, "BEEFY: Invalid commit"); // Reputation cost per signature checked for invalid proof. diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index fc19ecc30142..ed8ed68c4e8d 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -159,7 +159,7 @@ where // The proof is valid and the block is imported and final, we can import. debug!( target: LOG_TARGET, - "🥩 import justif {:?} for block number {:?}.", proof, number + "🥩 import justif {} for block number {:?}.", proof, number ); // Send the justification to the BEEFY voter for processing. self.justification_sender diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 11a105561e47..323af1bc8305 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -31,7 +31,7 @@ use crate::{ import::BeefyBlockImport, metrics::register_metrics, }; -use futures::{stream::Fuse, StreamExt}; +use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, warn}; use parking_lot::Mutex; use prometheus::Registry; @@ -40,17 +40,21 @@ use sc_consensus::BlockImport; use sc_network::{NetworkRequest, NotificationService, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{ - Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult, -}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_consensus_beefy::{ - ecdsa_crypto::AuthorityId, BeefyApi, MmrRootHash, PayloadProvider, ValidatorSet, + ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet, + BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; -use std::{collections::BTreeMap, marker::PhantomData, sync::Arc, time::Duration}; +use std::{ + collections::{BTreeMap, VecDeque}, + marker::PhantomData, + sync::Arc, + time::Duration, +}; mod aux_schema; mod error; @@ -63,9 +67,19 @@ pub mod communication; pub mod import; pub mod justification; +use crate::{ + communication::{gossip::GossipValidator, peers::PeerReport}, + justification::BeefyVersionedFinalityProof, + keystore::BeefyKeystore, + metrics::VoterMetrics, + round::Rounds, + worker::{BeefyWorker, PersistedState}, +}; pub use communication::beefy_protocol_name::{ gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, }; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_runtime::generic::OpaqueDigestItemId; #[cfg(test)] mod tests; @@ -209,6 +223,247 @@ pub struct BeefyParams { /// Handler for incoming BEEFY justifications requests from a remote peer. pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } +/// Helper object holding BEEFY worker communication/gossip components. +/// +/// These are created once, but will be reused if worker is restarted/reinitialized. +pub(crate) struct BeefyComms { + pub gossip_engine: GossipEngine, + pub gossip_validator: Arc>, + pub gossip_report_stream: TracingUnboundedReceiver, + pub on_demand_justifications: OnDemandJustificationsEngine, +} + +/// Helper builder object for building [worker::BeefyWorker]. +/// +/// It has to do it in two steps: initialization and build, because the first step can sleep waiting +/// for certain chain and backend conditions, and while sleeping we still need to pump the +/// GossipEngine. Once initialization is done, the GossipEngine (and other pieces) are added to get +/// the complete [worker::BeefyWorker] object. +pub(crate) struct BeefyWorkerBuilder { + // utilities + backend: Arc, + runtime: Arc, + key_store: BeefyKeystore, + // voter metrics + metrics: Option, + persisted_state: PersistedState, +} + +impl BeefyWorkerBuilder +where + B: Block + codec::Codec, + BE: Backend, + R: ProvideRuntimeApi, + R::Api: BeefyApi, +{ + /// This will wait for the chain to enable BEEFY (if not yet enabled) and also wait for the + /// backend to sync all headers required by the voter to build a contiguous chain of mandatory + /// justifications. Then it builds the initial voter state using a combination of previously + /// persisted state in AUX DB and latest chain information/progress. + /// + /// Returns a sane `BeefyWorkerBuilder` that can build the `BeefyWorker`. + pub async fn async_initialize( + backend: Arc, + runtime: Arc, + key_store: BeefyKeystore, + metrics: Option, + min_block_delta: u32, + gossip_validator: Arc>, + finality_notifications: &mut Fuse>, + ) -> Result { + // Wait for BEEFY pallet to be active before starting voter. + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&*runtime, finality_notifications).await?; + + let persisted_state = Self::load_or_init_state( + beefy_genesis, + best_grandpa, + min_block_delta, + backend.clone(), + runtime.clone(), + &key_store, + &metrics, + ) + .await?; + // Update the gossip validator with the right starting round and set id. + persisted_state + .gossip_filter_config() + .map(|f| gossip_validator.update_filter(f))?; + + Ok(BeefyWorkerBuilder { backend, runtime, key_store, metrics, persisted_state }) + } + + /// Takes rest of missing pieces as params and builds the `BeefyWorker`. + pub fn build( + self, + payload_provider: P, + sync: Arc, + comms: BeefyComms, + links: BeefyVoterLinks, + pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, + ) -> BeefyWorker { + BeefyWorker { + backend: self.backend, + runtime: self.runtime, + key_store: self.key_store, + metrics: self.metrics, + persisted_state: self.persisted_state, + payload_provider, + sync, + comms, + links, + pending_justifications, + } + } + + // If no persisted state present, walk back the chain from first GRANDPA notification to either: + // - latest BEEFY finalized block, or if none found on the way, + // - BEEFY pallet genesis; + // Enqueue any BEEFY mandatory blocks (session boundaries) found on the way, for voter to + // finalize. + async fn init_state( + beefy_genesis: NumberFor, + best_grandpa: ::Header, + min_block_delta: u32, + backend: Arc, + runtime: Arc, + ) -> Result, Error> { + let blockchain = backend.blockchain(); + + let beefy_genesis = runtime + .runtime_api() + .beefy_genesis(best_grandpa.hash()) + .ok() + .flatten() + .filter(|genesis| *genesis == beefy_genesis) + .ok_or_else(|| Error::Backend("BEEFY pallet expected to be active.".into()))?; + // Walk back the imported blocks and initialize voter either, at the last block with + // a BEEFY justification, or at pallet genesis block; voter will resume from there. + let mut sessions = VecDeque::new(); + let mut header = best_grandpa.clone(); + let state = loop { + if let Some(true) = blockchain + .justifications(header.hash()) + .ok() + .flatten() + .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) + { + debug!( + target: LOG_TARGET, + "🥩 Initialize BEEFY voter at last BEEFY finalized block: {:?}.", + *header.number() + ); + let best_beefy = *header.number(); + // If no session boundaries detected so far, just initialize new rounds here. + if sessions.is_empty() { + let active_set = + expect_validator_set(runtime.as_ref(), backend.as_ref(), &header).await?; + let mut rounds = Rounds::new(best_beefy, active_set); + // Mark the round as already finalized. + rounds.conclude(best_beefy); + sessions.push_front(rounds); + } + let state = PersistedState::checked_new( + best_grandpa, + best_beefy, + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; + break state + } + + if *header.number() == beefy_genesis { + // We've reached BEEFY genesis, initialize voter here. + let genesis_set = + expect_validator_set(runtime.as_ref(), backend.as_ref(), &header).await?; + info!( + target: LOG_TARGET, + "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ + Starting voting rounds at block {:?}, genesis validator set {:?}.", + beefy_genesis, + genesis_set, + ); + + sessions.push_front(Rounds::new(beefy_genesis, genesis_set)); + break PersistedState::checked_new( + best_grandpa, + Zero::zero(), + sessions, + min_block_delta, + beefy_genesis, + ) + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? + } + + if let Some(active) = find_authorities_change::(&header) { + debug!( + target: LOG_TARGET, + "🥩 Marking block {:?} as BEEFY Mandatory.", + *header.number() + ); + sessions.push_front(Rounds::new(*header.number(), active)); + } + + // Move up the chain. + header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY).await?; + }; + + aux_schema::write_current_version(backend.as_ref())?; + aux_schema::write_voter_state(backend.as_ref(), &state)?; + Ok(state) + } + + async fn load_or_init_state( + beefy_genesis: NumberFor, + best_grandpa: ::Header, + min_block_delta: u32, + backend: Arc, + runtime: Arc, + key_store: &BeefyKeystore, + metrics: &Option, + ) -> Result, Error> { + // Initialize voter state from AUX DB if compatible. + if let Some(mut state) = crate::aux_schema::load_persistent(backend.as_ref())? + // Verify state pallet genesis matches runtime. + .filter(|state| state.pallet_genesis() == beefy_genesis) + { + // Overwrite persisted state with current best GRANDPA block. + state.set_best_grandpa(best_grandpa.clone()); + // Overwrite persisted data with newly provided `min_block_delta`. + state.set_min_block_delta(min_block_delta); + debug!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); + + // Make sure that all the headers that we need have been synced. + let mut new_sessions = vec![]; + let mut header = best_grandpa.clone(); + while *header.number() > state.best_beefy() { + if state.voting_oracle().can_add_session(*header.number()) { + if let Some(active) = find_authorities_change::(&header) { + new_sessions.push((active, *header.number())); + } + } + header = + wait_for_parent_header(backend.blockchain(), header, HEADER_SYNC_DELAY).await?; + } + + // Make sure we didn't miss any sessions during node restart. + for (validator_set, new_session_start) in new_sessions.drain(..).rev() { + debug!( + target: LOG_TARGET, + "🥩 Handling missed BEEFY session after node restart: {:?}.", + new_session_start + ); + state.init_session_at(new_session_start, validator_set, key_store, metrics); + } + return Ok(state) + } + + // No valid voter-state persisted, re-initialize from pallet genesis. + Self::init_state(beefy_genesis, best_grandpa, min_block_delta, backend, runtime).await + } +} /// Start the BEEFY gadget. /// @@ -277,7 +532,7 @@ pub async fn start_beefy_gadget( known_peers, prometheus_registry.clone(), ); - let mut beefy_comms = worker::BeefyComms { + let mut beefy_comms = BeefyComms { gossip_engine, gossip_validator, gossip_report_stream, @@ -287,57 +542,45 @@ pub async fn start_beefy_gadget( // We re-create and re-run the worker in this loop in order to quickly reinit and resume after // select recoverable errors. loop { - // Wait for BEEFY pallet to be active before starting voter. - let (beefy_genesis, best_grandpa) = match wait_for_runtime_pallet( - &*runtime, - &mut beefy_comms.gossip_engine, - &mut finality_notifications, - ) - .await - { - Ok(res) => res, - Err(e) => { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - }, - }; - - let mut worker_base = worker::BeefyWorkerBase { - backend: backend.clone(), - runtime: runtime.clone(), - key_store: key_store.clone().into(), - metrics: metrics.clone(), - _phantom: Default::default(), - }; - - let persisted_state = match worker_base - .load_or_init_state(beefy_genesis, best_grandpa, min_block_delta) - .await - { - Ok(state) => state, - Err(e) => { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - }, + // Make sure to pump gossip engine while waiting for initialization conditions. + let worker_builder = loop { + futures::select! { + builder_init_result = BeefyWorkerBuilder::async_initialize( + backend.clone(), + runtime.clone(), + key_store.clone().into(), + metrics.clone(), + min_block_delta, + beefy_comms.gossip_validator.clone(), + &mut finality_notifications, + ).fuse() => { + match builder_init_result { + Ok(builder) => break builder, + Err(e) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", e); + return + }, + } + }, + // Pump peer reports + _ = &mut beefy_comms.gossip_report_stream.next() => { + continue + }, + // Pump gossip engine. + _ = &mut beefy_comms.gossip_engine => { + error!(target: LOG_TARGET, "🥩 Gossip engine has unexpectedly terminated."); + return + } + } }; - // Update the gossip validator with the right starting round and set id. - if let Err(e) = persisted_state - .gossip_filter_config() - .map(|f| beefy_comms.gossip_validator.update_filter(f)) - { - error!(target: LOG_TARGET, "Error: {:?}. Terminating.", e); - return - } - let worker = worker::BeefyWorker { - base: worker_base, - payload_provider: payload_provider.clone(), - sync: sync.clone(), - comms: beefy_comms, - links: links.clone(), - pending_justifications: BTreeMap::new(), - persisted_state, - }; + let worker = worker_builder.build( + payload_provider.clone(), + sync.clone(), + beefy_comms, + links.clone(), + BTreeMap::new(), + ); match futures::future::select( Box::pin(worker.run(&mut block_import_justif, &mut finality_notifications)), @@ -404,9 +647,8 @@ where /// Should be called only once during worker initialization. async fn wait_for_runtime_pallet( runtime: &R, - mut gossip_engine: &mut GossipEngine, finality: &mut Fuse>, -) -> ClientResult<(NumberFor, ::Header)> +) -> Result<(NumberFor, ::Header), Error> where B: Block, R: ProvideRuntimeApi, @@ -414,33 +656,24 @@ where { info!(target: LOG_TARGET, "🥩 BEEFY gadget waiting for BEEFY pallet to become available..."); loop { - futures::select! { - notif = finality.next() => { - let notif = match notif { - Some(notif) => notif, - None => break - }; - let at = notif.header.hash(); - if let Some(start) = runtime.runtime_api().beefy_genesis(at).ok().flatten() { - if *notif.header.number() >= start { - // Beefy pallet available, return header for best grandpa at the time. - info!( - target: LOG_TARGET, - "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", - notif.header.number(), start - ); - return Ok((start, notif.header)) - } - } - }, - _ = gossip_engine => { - break + let notif = finality.next().await.ok_or_else(|| { + let err_msg = "🥩 Finality stream has unexpectedly terminated.".into(); + error!(target: LOG_TARGET, "{}", err_msg); + Error::Backend(err_msg) + })?; + let at = notif.header.hash(); + if let Some(start) = runtime.runtime_api().beefy_genesis(at).ok().flatten() { + if *notif.header.number() >= start { + // Beefy pallet available, return header for best grandpa at the time. + info!( + target: LOG_TARGET, + "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", + notif.header.number(), start + ); + return Ok((start, notif.header)) } } } - let err_msg = "🥩 Gossip engine has unexpectedly terminated.".into(); - error!(target: LOG_TARGET, "{}", err_msg); - Err(ClientError::Backend(err_msg)) } /// Provides validator set active `at_header`. It tries to get it from state, otherwise falls @@ -474,7 +707,7 @@ where if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { return Ok(active) } else { - match worker::find_authorities_change::(&header) { + match find_authorities_change::(&header) { Some(active) => return Ok(active), // Move up the chain. Ultimately we'll get it from chain genesis state, or error out // there. @@ -486,3 +719,18 @@ where } } } + +/// Scan the `header` digest log for a BEEFY validator set change. Return either the new +/// validator set or `None` in case no validator set change has been signaled. +pub(crate) fn find_authorities_change(header: &B::Header) -> Option> +where + B: Block, +{ + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), + _ => None, + }; + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 0e6ff210b158..d106c9dcd881 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -32,8 +32,8 @@ use crate::{ gossip_protocol_name, justification::*, wait_for_runtime_pallet, - worker::{BeefyWorkerBase, PersistedState}, - BeefyRPCLinks, BeefyVoterLinks, KnownPeers, + worker::PersistedState, + BeefyRPCLinks, BeefyVoterLinks, BeefyWorkerBuilder, KnownPeers, }; use futures::{future, stream::FuturesUnordered, Future, FutureExt, StreamExt}; use parking_lot::Mutex; @@ -368,27 +368,19 @@ async fn voter_init_setup( api: &TestApi, ) -> Result, Error> { let backend = net.peer(0).client().as_backend(); - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(api, &mut gossip_engine, finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { + let (beefy_genesis, best_grandpa) = wait_for_runtime_pallet(api, finality).await.unwrap(); + let key_store = None.into(); + let metrics = None; + BeefyWorkerBuilder::load_or_init_state( + beefy_genesis, + best_grandpa, + 1, backend, - runtime: Arc::new(api.clone()), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await + Arc::new(api.clone()), + &key_store, + &metrics, + ) + .await } // Spawns beefy voters. Returns a future to spawn on the runtime. @@ -1065,32 +1057,7 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[8], None).unwrap(); // load persistent state - nothing in DB, should init at genesis - // - // NOTE: code from `voter_init_setup()` is moved here because the new network event system - // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the - // first `GossipEngine` - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Test initialization at session boundary. // verify voter initialized with single session starting at block `custom_pallet_genesis` (7) @@ -1120,18 +1087,7 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap(); // load persistent state - state preset in DB, but with different pallet genesis - // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let new_persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // verify voter initialized with single session starting at block `new_pallet_genesis` (10) let sessions = new_persisted_state.voting_oracle().sessions(); @@ -1322,32 +1278,7 @@ async fn should_catch_up_when_loading_saved_voter_state() { let api = TestApi::with_validator_set(&validator_set); // load persistent state - nothing in DB, should init at genesis - // - // NOTE: code from `voter_init_setup()` is moved here because the new network event system - // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the - // first `GossipEngine` - let known_peers = Arc::new(Mutex::new(KnownPeers::new())); - let (gossip_validator, _) = GossipValidator::new(known_peers); - let gossip_validator = Arc::new(gossip_validator); - let mut gossip_engine = sc_network_gossip::GossipEngine::new( - net.peer(0).network_service().clone(), - net.peer(0).sync_service().clone(), - net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), - "/beefy/whatever", - gossip_validator, - None, - ); - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api.clone()), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Test initialization at session boundary. // verify voter initialized with two sessions starting at blocks 1 and 10 @@ -1374,18 +1305,7 @@ async fn should_catch_up_when_loading_saved_voter_state() { // finalize 25 without justifications net.peer(0).client().as_client().finalize_block(hashes[25], None).unwrap(); // load persistent state - state preset in DB - // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` - let (beefy_genesis, best_grandpa) = - wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); - let mut worker_base = BeefyWorkerBase { - backend: backend.clone(), - runtime: Arc::new(api), - key_store: None.into(), - metrics: None, - _phantom: Default::default(), - }; - let persisted_state = - worker_base.load_or_init_state(beefy_genesis, best_grandpa, 1).await.unwrap(); + let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); // Verify voter initialized with old sessions plus a new one starting at block 20. // There shouldn't be any duplicates. diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index d7a229003cc4..c8eb19621ba5 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -17,46 +17,42 @@ // along with this program. If not, see . use crate::{ - aux_schema, communication::{ - gossip::{proofs_topic, votes_topic, GossipFilterCfg, GossipMessage, GossipValidator}, + gossip::{proofs_topic, votes_topic, GossipFilterCfg, GossipMessage}, peers::PeerReport, - request_response::outgoing_requests_engine::{OnDemandJustificationsEngine, ResponseInfo}, + request_response::outgoing_requests_engine::ResponseInfo, }, error::Error, - expect_validator_set, + find_authorities_change, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::VoterMetrics, round::{Rounds, VoteImportResult}, - wait_for_parent_header, BeefyVoterLinks, HEADER_SYNC_DELAY, LOG_TARGET, + BeefyComms, BeefyVoterLinks, LOG_TARGET, }; use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_network_gossip::GossipEngine; -use sc_utils::{mpsc::TracingUnboundedReceiver, notification::NotificationReceiver}; +use sc_utils::notification::NotificationReceiver; use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; -use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ check_equivocation_proof, ecdsa_crypto::{AuthorityId, Signature}, - BeefyApi, BeefySignatureHasher, Commitment, ConsensusLog, EquivocationProof, PayloadProvider, - ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, + BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSet, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, + generic::BlockId, traits::{Block, Header, NumberFor, Zero}, SaturatedConversion, }; use std::{ collections::{BTreeMap, BTreeSet, VecDeque}, fmt::Debug, - marker::PhantomData, sync::Arc, }; @@ -180,8 +176,8 @@ impl VoterOracle { } } - // Check if an observed session can be added to the Oracle. - fn can_add_session(&self, session_start: NumberFor) -> bool { + /// Check if an observed session can be added to the Oracle. + pub fn can_add_session(&self, session_start: NumberFor) -> bool { let latest_known_session_start = self.sessions.back().map(|session| session.session_start()); Some(session_start) > latest_known_session_start @@ -319,229 +315,28 @@ impl PersistedState { self.voting_oracle.best_grandpa_block_header = best_grandpa; } + pub fn voting_oracle(&self) -> &VoterOracle { + &self.voting_oracle + } + pub(crate) fn gossip_filter_config(&self) -> Result, Error> { let (start, end) = self.voting_oracle.accepted_interval()?; let validator_set = self.voting_oracle.current_validator_set()?; Ok(GossipFilterCfg { start, end, validator_set }) } -} - -/// Helper object holding BEEFY worker communication/gossip components. -/// -/// These are created once, but will be reused if worker is restarted/reinitialized. -pub(crate) struct BeefyComms { - pub gossip_engine: GossipEngine, - pub gossip_validator: Arc>, - pub gossip_report_stream: TracingUnboundedReceiver, - pub on_demand_justifications: OnDemandJustificationsEngine, -} - -pub(crate) struct BeefyWorkerBase { - // utilities - pub backend: Arc, - pub runtime: Arc, - pub key_store: BeefyKeystore, - - /// BEEFY client metrics. - pub metrics: Option, - - pub _phantom: PhantomData, -} - -impl BeefyWorkerBase -where - B: Block + Codec, - BE: Backend, - R: ProvideRuntimeApi, - R::Api: BeefyApi, -{ - // If no persisted state present, walk back the chain from first GRANDPA notification to either: - // - latest BEEFY finalized block, or if none found on the way, - // - BEEFY pallet genesis; - // Enqueue any BEEFY mandatory blocks (session boundaries) found on the way, for voter to - // finalize. - async fn init_state( - &self, - beefy_genesis: NumberFor, - best_grandpa: ::Header, - min_block_delta: u32, - ) -> Result, Error> { - let blockchain = self.backend.blockchain(); - - let beefy_genesis = self - .runtime - .runtime_api() - .beefy_genesis(best_grandpa.hash()) - .ok() - .flatten() - .filter(|genesis| *genesis == beefy_genesis) - .ok_or_else(|| Error::Backend("BEEFY pallet expected to be active.".into()))?; - // Walk back the imported blocks and initialize voter either, at the last block with - // a BEEFY justification, or at pallet genesis block; voter will resume from there. - let mut sessions = VecDeque::new(); - let mut header = best_grandpa.clone(); - let state = loop { - if let Some(true) = blockchain - .justifications(header.hash()) - .ok() - .flatten() - .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) - { - debug!( - target: LOG_TARGET, - "🥩 Initialize BEEFY voter at last BEEFY finalized block: {:?}.", - *header.number() - ); - let best_beefy = *header.number(); - // If no session boundaries detected so far, just initialize new rounds here. - if sessions.is_empty() { - let active_set = - expect_validator_set(self.runtime.as_ref(), self.backend.as_ref(), &header) - .await?; - let mut rounds = Rounds::new(best_beefy, active_set); - // Mark the round as already finalized. - rounds.conclude(best_beefy); - sessions.push_front(rounds); - } - let state = PersistedState::checked_new( - best_grandpa, - best_beefy, - sessions, - min_block_delta, - beefy_genesis, - ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; - break state - } - - if *header.number() == beefy_genesis { - // We've reached BEEFY genesis, initialize voter here. - let genesis_set = - expect_validator_set(self.runtime.as_ref(), self.backend.as_ref(), &header) - .await?; - info!( - target: LOG_TARGET, - "🥩 Loading BEEFY voter state from genesis on what appears to be first startup. \ - Starting voting rounds at block {:?}, genesis validator set {:?}.", - beefy_genesis, - genesis_set, - ); - - sessions.push_front(Rounds::new(beefy_genesis, genesis_set)); - break PersistedState::checked_new( - best_grandpa, - Zero::zero(), - sessions, - min_block_delta, - beefy_genesis, - ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? - } - - if let Some(active) = find_authorities_change::(&header) { - debug!( - target: LOG_TARGET, - "🥩 Marking block {:?} as BEEFY Mandatory.", - *header.number() - ); - sessions.push_front(Rounds::new(*header.number(), active)); - } - - // Move up the chain. - header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY).await?; - }; - - aux_schema::write_current_version(self.backend.as_ref())?; - aux_schema::write_voter_state(self.backend.as_ref(), &state)?; - Ok(state) - } - - pub async fn load_or_init_state( - &mut self, - beefy_genesis: NumberFor, - best_grandpa: ::Header, - min_block_delta: u32, - ) -> Result, Error> { - // Initialize voter state from AUX DB if compatible. - if let Some(mut state) = crate::aux_schema::load_persistent(self.backend.as_ref())? - // Verify state pallet genesis matches runtime. - .filter(|state| state.pallet_genesis() == beefy_genesis) - { - // Overwrite persisted state with current best GRANDPA block. - state.set_best_grandpa(best_grandpa.clone()); - // Overwrite persisted data with newly provided `min_block_delta`. - state.set_min_block_delta(min_block_delta); - debug!(target: LOG_TARGET, "🥩 Loading BEEFY voter state from db: {:?}.", state); - - // Make sure that all the headers that we need have been synced. - let mut new_sessions = vec![]; - let mut header = best_grandpa.clone(); - while *header.number() > state.best_beefy() { - if state.voting_oracle.can_add_session(*header.number()) { - if let Some(active) = find_authorities_change::(&header) { - new_sessions.push((active, *header.number())); - } - } - header = - wait_for_parent_header(self.backend.blockchain(), header, HEADER_SYNC_DELAY) - .await?; - } - - // Make sure we didn't miss any sessions during node restart. - for (validator_set, new_session_start) in new_sessions.drain(..).rev() { - debug!( - target: LOG_TARGET, - "🥩 Handling missed BEEFY session after node restart: {:?}.", - new_session_start - ); - self.init_session_at(&mut state, validator_set, new_session_start); - } - return Ok(state) - } - - // No valid voter-state persisted, re-initialize from pallet genesis. - self.init_state(beefy_genesis, best_grandpa, min_block_delta).await - } - - /// Verify `active` validator set for `block` against the key store - /// - /// We want to make sure that we have _at least one_ key in our keystore that - /// is part of the validator set, that's because if there are no local keys - /// then we can't perform our job as a validator. - /// - /// Note that for a non-authority node there will be no keystore, and we will - /// return an error and don't check. The error can usually be ignored. - fn verify_validator_set( - &self, - block: &NumberFor, - active: &ValidatorSet, - ) -> Result<(), Error> { - let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); - - let public_keys = self.key_store.public_keys()?; - let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); - - if store.intersection(&active).count() == 0 { - let msg = "no authority public key found in store".to_string(); - debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); - metric_inc!(self.metrics, beefy_no_authority_found_in_store); - Err(Error::Keystore(msg)) - } else { - Ok(()) - } - } /// Handle session changes by starting new voting round for mandatory blocks. - fn init_session_at( + pub fn init_session_at( &mut self, - persisted_state: &mut PersistedState, - validator_set: ValidatorSet, new_session_start: NumberFor, + validator_set: ValidatorSet, + key_store: &BeefyKeystore, + metrics: &Option, ) { debug!(target: LOG_TARGET, "🥩 New active validator set: {:?}", validator_set); // BEEFY should finalize a mandatory block during each session. - if let Ok(active_session) = persisted_state.voting_oracle.active_rounds() { + if let Ok(active_session) = self.voting_oracle.active_rounds() { if !active_session.mandatory_done() { debug!( target: LOG_TARGET, @@ -549,20 +344,20 @@ where validator_set.id(), active_session.validator_set_id(), ); - metric_inc!(self.metrics, beefy_lagging_sessions); + metric_inc!(metrics, beefy_lagging_sessions); } } if log_enabled!(target: LOG_TARGET, log::Level::Debug) { // verify the new validator set - only do it if we're also logging the warning - let _ = self.verify_validator_set(&new_session_start, &validator_set); + if verify_validator_set::(&new_session_start, &validator_set, key_store).is_err() { + metric_inc!(metrics, beefy_no_authority_found_in_store); + } } let id = validator_set.id(); - persisted_state - .voting_oracle - .add_session(Rounds::new(new_session_start, validator_set)); - metric_set!(self.metrics, beefy_validator_set_id, id); + self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); + metric_set!(metrics, beefy_validator_set_id, id); info!( target: LOG_TARGET, "🥩 New Rounds for validator set id: {:?} with session_start {:?}", @@ -574,9 +369,10 @@ where /// A BEEFY worker/voter that follows the BEEFY protocol pub(crate) struct BeefyWorker { - pub base: BeefyWorkerBase, - - // utils + // utilities + pub backend: Arc, + pub runtime: Arc, + pub key_store: BeefyKeystore, pub payload_provider: P, pub sync: Arc, @@ -592,6 +388,8 @@ pub(crate) struct BeefyWorker { pub pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Persisted voter state. pub persisted_state: PersistedState, + /// BEEFY voter metrics + pub metrics: Option, } impl BeefyWorker @@ -622,8 +420,12 @@ where validator_set: ValidatorSet, new_session_start: NumberFor, ) { - self.base - .init_session_at(&mut self.persisted_state, validator_set, new_session_start); + self.persisted_state.init_session_at( + new_session_start, + validator_set, + &self.key_store, + &self.metrics, + ); } fn handle_finality_notification( @@ -639,8 +441,7 @@ where notification.tree_route, ); - self.base - .runtime + self.runtime .runtime_api() .beefy_genesis(header.hash()) .ok() @@ -654,7 +455,7 @@ where self.persisted_state.set_best_grandpa(header.clone()); // Check all (newly) finalized blocks for new session(s). - let backend = self.base.backend.clone(); + let backend = self.backend.clone(); for header in notification .tree_route .iter() @@ -673,7 +474,7 @@ where } if new_session_added { - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string()))?; } @@ -707,7 +508,7 @@ where true, ); }, - RoundAction::Drop => metric_inc!(self.base.metrics, beefy_stale_votes), + RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_votes), RoundAction::Enqueue => error!(target: LOG_TARGET, "🥩 unexpected vote: {:?}.", vote), }; Ok(()) @@ -727,23 +528,23 @@ where match self.voting_oracle().triage_round(block_num)? { RoundAction::Process => { debug!(target: LOG_TARGET, "🥩 Process justification for round: {:?}.", block_num); - metric_inc!(self.base.metrics, beefy_imported_justifications); + metric_inc!(self.metrics, beefy_imported_justifications); self.finalize(justification)? }, RoundAction::Enqueue => { debug!(target: LOG_TARGET, "🥩 Buffer justification for round: {:?}.", block_num); if self.pending_justifications.len() < MAX_BUFFERED_JUSTIFICATIONS { self.pending_justifications.entry(block_num).or_insert(justification); - metric_inc!(self.base.metrics, beefy_buffered_justifications); + metric_inc!(self.metrics, beefy_buffered_justifications); } else { - metric_inc!(self.base.metrics, beefy_buffered_justifications_dropped); + metric_inc!(self.metrics, beefy_buffered_justifications_dropped); warn!( target: LOG_TARGET, "🥩 Buffer justification dropped for round: {:?}.", block_num ); } }, - RoundAction::Drop => metric_inc!(self.base.metrics, beefy_stale_justifications), + RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_justifications), }; Ok(()) } @@ -765,7 +566,7 @@ where // We created the `finality_proof` and know to be valid. // New state is persisted after finalization. self.finalize(finality_proof.clone())?; - metric_inc!(self.base.metrics, beefy_good_votes_processed); + metric_inc!(self.metrics, beefy_good_votes_processed); return Ok(Some(finality_proof)) }, VoteImportResult::Ok => { @@ -776,20 +577,17 @@ where .map(|(mandatory_num, _)| mandatory_num == block_number) .unwrap_or(false) { - crate::aux_schema::write_voter_state( - &*self.base.backend, - &self.persisted_state, - ) - .map_err(|e| Error::Backend(e.to_string()))?; + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; } - metric_inc!(self.base.metrics, beefy_good_votes_processed); + metric_inc!(self.metrics, beefy_good_votes_processed); }, VoteImportResult::Equivocation(proof) => { - metric_inc!(self.base.metrics, beefy_equivocation_votes); + metric_inc!(self.metrics, beefy_equivocation_votes); self.report_equivocation(proof)?; }, - VoteImportResult::Invalid => metric_inc!(self.base.metrics, beefy_invalid_votes), - VoteImportResult::Stale => metric_inc!(self.base.metrics, beefy_stale_votes), + VoteImportResult::Invalid => metric_inc!(self.metrics, beefy_invalid_votes), + VoteImportResult::Stale => metric_inc!(self.metrics, beefy_stale_votes), }; Ok(None) } @@ -816,15 +614,14 @@ where // Set new best BEEFY block number. self.persisted_state.set_best_beefy(block_num); - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string()))?; - metric_set!(self.base.metrics, beefy_best_block, block_num); + metric_set!(self.metrics, beefy_best_block, block_num); self.comms.on_demand_justifications.cancel_requests_older_than(block_num); if let Err(e) = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(block_num)) @@ -834,8 +631,7 @@ where .notify(|| Ok::<_, ()>(hash)) .expect("forwards closure result; the closure always returns Ok; qed."); - self.base - .backend + self.backend .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) }) { debug!( @@ -872,13 +668,13 @@ where for (num, justification) in justifs_to_process.into_iter() { debug!(target: LOG_TARGET, "🥩 Handle buffered justification for: {:?}.", num); - metric_inc!(self.base.metrics, beefy_imported_justifications); + metric_inc!(self.metrics, beefy_imported_justifications); if let Err(err) = self.finalize(justification) { error!(target: LOG_TARGET, "🥩 Error finalizing block: {}", err); } } metric_set!( - self.base.metrics, + self.metrics, beefy_buffered_justifications, self.pending_justifications.len() ); @@ -890,7 +686,7 @@ where fn try_to_vote(&mut self) -> Result<(), Error> { // Vote if there's now a new vote target. if let Some(target) = self.voting_oracle().voting_target() { - metric_set!(self.base.metrics, beefy_should_vote_on, target); + metric_set!(self.metrics, beefy_should_vote_on, target); if target > self.persisted_state.best_voted { self.do_vote(target)?; } @@ -910,7 +706,6 @@ where self.persisted_state.voting_oracle.best_grandpa_block_header.clone() } else { let hash = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(target_number)) @@ -922,7 +717,7 @@ where Error::Backend(err_msg) })?; - self.base.backend.blockchain().expect_header(hash).map_err(|err| { + self.backend.blockchain().expect_header(hash).map_err(|err| { let err_msg = format!( "Couldn't get header for block #{:?} ({:?}) (error: {:?}), skipping vote..", target_number, hash, err @@ -942,7 +737,7 @@ where let rounds = self.persisted_state.voting_oracle.active_rounds_mut()?; let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id()); - let authority_id = if let Some(id) = self.base.key_store.authority_id(validators) { + let authority_id = if let Some(id) = self.key_store.authority_id(validators) { debug!(target: LOG_TARGET, "🥩 Local authority id: {:?}", id); id } else { @@ -956,7 +751,7 @@ where let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); - let signature = match self.base.key_store.sign(&authority_id, &encoded_commitment) { + let signature = match self.key_store.sign(&authority_id, &encoded_commitment) { Ok(sig) => sig, Err(err) => { warn!(target: LOG_TARGET, "🥩 Error signing commitment: {:?}", err); @@ -981,7 +776,7 @@ where .gossip_engine .gossip_message(proofs_topic::(), encoded_proof, true); } else { - metric_inc!(self.base.metrics, beefy_votes_sent); + metric_inc!(self.metrics, beefy_votes_sent); debug!(target: LOG_TARGET, "🥩 Sent vote message: {:?}", vote); let encoded_vote = GossipMessage::::Vote(vote).encode(); self.comms.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); @@ -989,8 +784,8 @@ where // Persist state after vote to avoid double voting in case of voter restarts. self.persisted_state.best_voted = target_number; - metric_set!(self.base.metrics, beefy_best_voted, target_number); - crate::aux_schema::write_voter_state(&*self.base.backend, &self.persisted_state) + metric_set!(self.metrics, beefy_best_voted, target_number); + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) .map_err(|e| Error::Backend(e.to_string())) } @@ -1164,7 +959,7 @@ where if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof); return Ok(()) - } else if let Some(local_id) = self.base.key_store.authority_id(validators) { + } else if let Some(local_id) = self.key_store.authority_id(validators) { if offender_id == local_id { warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); return Ok(()) @@ -1173,7 +968,6 @@ where let number = *proof.round_number(); let hash = self - .base .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(number)) @@ -1184,7 +978,7 @@ where ); Error::Backend(err_msg) })?; - let runtime_api = self.base.runtime.runtime_api(); + let runtime_api = self.runtime.runtime_api(); // generate key ownership proof at that block let key_owner_proof = match runtime_api .generate_key_ownership_proof(hash, validator_set_id, offender_id) @@ -1201,7 +995,7 @@ where }; // submit equivocation report at **best** block - let best_block_hash = self.base.backend.blockchain().info().best_hash; + let best_block_hash = self.backend.blockchain().info().best_hash; runtime_api .submit_report_equivocation_unsigned_extrinsic(best_block_hash, proof, key_owner_proof) .map_err(Error::RuntimeApi)?; @@ -1210,21 +1004,6 @@ where } } -/// Scan the `header` digest log for a BEEFY validator set change. Return either the new -/// validator set or `None` in case no validator set change has been signaled. -pub(crate) fn find_authorities_change(header: &B::Header) -> Option> -where - B: Block, -{ - let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); - - let filter = |log: ConsensusLog| match log { - ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), - _ => None, - }; - header.digest().convert_first(|l| l.try_to(id).and_then(filter)) -} - /// Calculate next block number to vote on. /// /// Return `None` if there is no votable target yet. @@ -1261,11 +1040,42 @@ where } } +/// Verify `active` validator set for `block` against the key store +/// +/// We want to make sure that we have _at least one_ key in our keystore that +/// is part of the validator set, that's because if there are no local keys +/// then we can't perform our job as a validator. +/// +/// Note that for a non-authority node there will be no keystore, and we will +/// return an error and don't check. The error can usually be ignored. +fn verify_validator_set( + block: &NumberFor, + active: &ValidatorSet, + key_store: &BeefyKeystore, +) -> Result<(), Error> { + let active: BTreeSet<&AuthorityId> = active.validators().iter().collect(); + + let public_keys = key_store.public_keys()?; + let store: BTreeSet<&AuthorityId> = public_keys.iter().collect(); + + if store.intersection(&active).count() == 0 { + let msg = "no authority public key found in store".to_string(); + debug!(target: LOG_TARGET, "🥩 for block {:?} {}", block, msg); + Err(Error::Keystore(msg)) + } else { + Ok(()) + } +} + #[cfg(test)] pub(crate) mod tests { use super::*; use crate::{ - communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, + communication::{ + gossip::GossipValidator, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, + request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + }, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, BeefyPeer, BeefyTestNet, TestApi, @@ -1275,6 +1085,7 @@ pub(crate) mod tests { use futures::{future::poll_fn, task::Poll}; use parking_lot::Mutex; use sc_client_api::{Backend as BackendT, HeaderBackend}; + use sc_network_gossip::GossipEngine; use sc_network_sync::SyncingService; use sc_network_test::TestNetFactory; use sp_blockchain::Backend as BlockchainBackendT; @@ -1283,7 +1094,7 @@ pub(crate) mod tests { known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, test_utils::{generate_equivocation_proof, Keyring}, - Payload, SignedCommitment, + ConsensusLog, Payload, SignedCommitment, }; use sp_runtime::traits::{Header as HeaderT, One}; use substrate_test_runtime_client::{ @@ -1292,10 +1103,6 @@ pub(crate) mod tests { }; impl PersistedState { - pub fn voting_oracle(&self) -> &VoterOracle { - &self.voting_oracle - } - pub fn active_round(&self) -> Result<&Rounds, Error> { self.voting_oracle.active_rounds() } @@ -1391,13 +1198,10 @@ pub(crate) mod tests { on_demand_justifications, }; BeefyWorker { - base: BeefyWorkerBase { - backend, - runtime: api, - key_store: Some(keystore).into(), - metrics, - _phantom: Default::default(), - }, + backend, + runtime: api, + key_store: Some(keystore).into(), + metrics, payload_provider, sync: Arc::new(sync), links, @@ -1675,19 +1479,22 @@ pub(crate) mod tests { let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); // keystore doesn't contain other keys than validators' - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), Ok(())); + assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), Ok(())); // unknown `Bob` key let keys = &[Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let err_msg = "no authority public key found in store".to_string(); let expected = Err(Error::Keystore(err_msg)); - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), expected); + assert_eq!(verify_validator_set::(&1, &validator_set, &worker.key_store), expected); // worker has no keystore - worker.base.key_store = None.into(); + worker.key_store = None.into(); let expected_err = Err(Error::Keystore("no Keystore".into())); - assert_eq!(worker.base.verify_validator_set(&1, &validator_set), expected_err); + assert_eq!( + verify_validator_set::(&1, &validator_set, &worker.key_store), + expected_err + ); } #[tokio::test] @@ -1839,7 +1646,7 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone()); - worker.base.runtime = api_alice.clone(); + worker.runtime = api_alice.clone(); // let there be a block with num = 1: let _ = net.peer(0).push_blocks(1, false); diff --git a/substrate/primitives/consensus/beefy/src/commitment.rs b/substrate/primitives/consensus/beefy/src/commitment.rs index 1f0fb34ebf10..335c6b604f04 100644 --- a/substrate/primitives/consensus/beefy/src/commitment.rs +++ b/substrate/primitives/consensus/beefy/src/commitment.rs @@ -97,6 +97,19 @@ pub struct SignedCommitment { pub signatures: Vec>, } +impl sp_std::fmt::Display + for SignedCommitment +{ + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + let signatures_count = self.signatures.iter().filter(|s| s.is_some()).count(); + write!( + f, + "SignedCommitment(commitment: {:?}, signatures_count: {})", + self.commitment, signatures_count + ) + } +} + impl SignedCommitment { /// Return the number of collected signatures. pub fn no_of_signatures(&self) -> usize { @@ -241,6 +254,14 @@ pub enum VersionedFinalityProof { V1(SignedCommitment), } +impl sp_std::fmt::Display for VersionedFinalityProof { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + match self { + VersionedFinalityProof::V1(sc) => write!(f, "VersionedFinalityProof::V1({})", sc), + } + } +} + impl From> for VersionedFinalityProof { fn from(commitment: SignedCommitment) -> Self { VersionedFinalityProof::V1(commitment) From 9bf1a5e23884921498b381728bfddaae93f83744 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 22 Feb 2024 14:35:00 +0100 Subject: [PATCH 25/51] Check that the validation code matches the parachain code (#3433) This introduces a check to ensure that the parachain code matches the validation code stored in the relay chain state. If not, it will print a warning. This should be mainly useful for parachain builders to make sure they have setup everything correctly. --- .../consensus/aura/src/collators/basic.rs | 22 +++++++- .../consensus/aura/src/collators/lookahead.rs | 17 ++++-- .../consensus/aura/src/collators/mod.rs | 55 +++++++++++++++++++ cumulus/client/consensus/common/src/tests.rs | 9 +++ cumulus/client/network/src/tests.rs | 9 +++ .../src/lib.rs | 15 ++++- .../client/relay-chain-interface/src/lib.rs | 22 +++++++- .../relay-chain-rpc-interface/src/lib.rs | 13 ++++- .../src/rpc_client.rs | 14 +++++ polkadot/node/core/backing/src/lib.rs | 10 ++-- 10 files changed, 173 insertions(+), 13 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 8740b06005d6..52b83254951f 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -33,12 +33,12 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId}; +use polkadot_primitives::{CollatorPair, Id as ParaId, ValidationCode}; use futures::{channel::mpsc::Receiver, prelude::*}; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::BlockImport; -use sp_api::ProvideRuntimeApi; +use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus::SyncOracle; @@ -47,6 +47,7 @@ use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_state_machine::Backend as _; use std::{convert::TryFrom, sync::Arc, time::Duration}; use crate::collator as collator_util; @@ -100,6 +101,7 @@ where + AuxStore + HeaderBackend + BlockBackend + + CallApiAt + Send + Sync + 'static, @@ -172,6 +174,22 @@ where continue } + let Ok(Some(code)) = + params.para_client.state_at(parent_hash).map_err(drop).and_then(|s| { + s.storage(&sp_core::storage::well_known_keys::CODE).map_err(drop) + }) + else { + continue; + }; + + super::check_validation_code_or_log( + &ValidationCode::from(code).hash(), + params.para_id, + ¶ms.relay_client, + *request.relay_parent(), + ) + .await; + let relay_parent_header = match params.relay_client.header(RBlockId::hash(*request.relay_parent())).await { Err(e) => reject_with_error!(e), diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index a9f33173d832..161f10d55a19 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -290,10 +290,7 @@ where // If the longest chain has space, build upon that. Otherwise, don't // build at all. potential_parents.sort_by_key(|a| a.depth); - let initial_parent = match potential_parents.pop() { - None => continue, - Some(p) => p, - }; + let Some(initial_parent) = potential_parents.pop() else { continue }; // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. @@ -301,6 +298,10 @@ where let mut parent_header = initial_parent.header; let overseer_handle = &mut params.overseer_handle; + // We mainly call this to inform users at genesis if there is a mismatch with the + // on-chain data. + collator.collator_service().check_block_status(parent_hash, &parent_header); + // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2 { @@ -353,6 +354,14 @@ where Some(v) => v, }; + super::check_validation_code_or_log( + &validation_code_hash, + params.para_id, + ¶ms.relay_client, + relay_parent, + ) + .await; + match collator .collate( &parent_header, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 4c7b759daf73..6e0067d0cedb 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -20,5 +20,60 @@ //! included parachain block, as well as the [`lookahead`] collator, which prospectively //! builds on parachain blocks which have not yet been included in the relay chain. +use cumulus_relay_chain_interface::RelayChainInterface; +use polkadot_primitives::{ + Hash as RHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, +}; + pub mod basic; pub mod lookahead; + +/// Check the `local_validation_code_hash` against the validation code hash in the relay chain +/// state. +/// +/// If the code hashes do not match, it prints a warning. +async fn check_validation_code_or_log( + local_validation_code_hash: &ValidationCodeHash, + para_id: ParaId, + relay_client: &impl RelayChainInterface, + relay_parent: RHash, +) { + let state_validation_code_hash = match relay_client + .validation_code_hash(relay_parent, para_id, OccupiedCoreAssumption::Included) + .await + { + Ok(hash) => hash, + Err(error) => { + tracing::debug!( + target: super::LOG_TARGET, + %error, + ?relay_parent, + %para_id, + "Failed to fetch validation code hash", + ); + return + }, + }; + + match state_validation_code_hash { + Some(state) => + if state != *local_validation_code_hash { + tracing::warn!( + target: super::LOG_TARGET, + %para_id, + ?relay_parent, + ?local_validation_code_hash, + relay_validation_code_hash = ?state, + "Parachain code doesn't match validation code stored in the relay chain state", + ); + }, + None => { + tracing::warn!( + target: super::LOG_TARGET, + %para_id, + ?relay_parent, + "Could not find validation code for parachain in the relay chain state.", + ); + }, + } +} diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 597d1ab2acc2..bfb95ae388ae 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -136,6 +136,15 @@ impl RelayChainInterface for Relaychain { Ok(Some(PersistedValidationData { parent_head, ..Default::default() })) } + async fn validation_code_hash( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + async fn candidate_pending_availability( &self, _: PHash, diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index e03f470753bb..d986635f961c 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -117,6 +117,15 @@ impl RelayChainInterface for DummyRelayChainInterface { })) } + async fn validation_code_hash( + &self, + _: PHash, + _: ParaId, + _: OccupiedCoreAssumption, + ) -> RelayChainResult> { + unimplemented!("Not needed for test") + } + async fn candidate_pending_availability( &self, _: PHash, diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 866214fe2c52..6ea02b2e7c1f 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -21,7 +21,7 @@ use cumulus_primitives_core::{ relay_chain::{ runtime_api::ParachainHost, Block as PBlock, BlockId, CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, OccupiedCoreAssumption, SessionIndex, - ValidatorId, + ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -115,6 +115,19 @@ impl RelayChainInterface for RelayChainInProcessInterface { )?) } + async fn validation_code_hash( + &self, + hash: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + Ok(self.full_client.runtime_api().validation_code_hash( + hash, + para_id, + occupied_core_assumption, + )?) + } + async fn candidate_pending_availability( &self, hash: PHash, diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index de5e7891b30d..bb93e6a168c8 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -30,7 +30,7 @@ use cumulus_primitives_core::relay_chain::BlockId; pub use cumulus_primitives_core::{ relay_chain::{ CommittedCandidateReceipt, Hash as PHash, Header as PHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidatorId, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -194,6 +194,15 @@ pub trait RelayChainInterface: Send + Sync { relay_parent: PHash, relevant_keys: &Vec>, ) -> RelayChainResult; + + /// Returns the validation code hash for the given `para_id` using the given + /// `occupied_core_assumption`. + async fn validation_code_hash( + &self, + relay_parent: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult>; } #[async_trait] @@ -301,4 +310,15 @@ where async fn header(&self, block_id: BlockId) -> RelayChainResult> { (**self).header(block_id).await } + + async fn validation_code_hash( + &self, + relay_parent: PHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + (**self) + .validation_code_hash(relay_parent, para_id, occupied_core_assumption) + .await + } } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 96f8fc8b5563..3a4c186e301e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -19,7 +19,7 @@ use core::time::Duration; use cumulus_primitives_core::{ relay_chain::{ CommittedCandidateReceipt, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, - OccupiedCoreAssumption, SessionIndex, ValidatorId, + OccupiedCoreAssumption, SessionIndex, ValidationCodeHash, ValidatorId, }, InboundDownwardMessage, ParaId, PersistedValidationData, }; @@ -110,6 +110,17 @@ impl RelayChainInterface for RelayChainRpcInterface { .await } + async fn validation_code_hash( + &self, + hash: RelayHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> RelayChainResult> { + self.rpc_client + .validation_code_hash(hash, para_id, occupied_core_assumption) + .await + } + async fn candidate_pending_availability( &self, hash: RelayHash, diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index a912997e947d..6578210a259c 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -647,6 +647,20 @@ impl RelayChainRpcClient { .await } + pub async fn validation_code_hash( + &self, + at: RelayHash, + para_id: ParaId, + occupied_core_assumption: OccupiedCoreAssumption, + ) -> Result, RelayChainError> { + self.call_remote_runtime_function( + "ParachainHost_validation_code_hash", + at, + Some((para_id, occupied_core_assumption)), + ) + .await + } + fn send_register_message_to_worker( &self, message: RpcDispatcherMessage, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index cc192607cea0..26f20a6d48ef 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -1887,18 +1887,20 @@ async fn background_validate_and_make_available( if rp_state.awaiting_validation.insert(candidate_hash) { // spawn background task. let bg = async move { - if let Err(e) = validate_and_make_available(params).await { - if let Error::BackgroundValidationMpsc(error) = e { + if let Err(error) = validate_and_make_available(params).await { + if let Error::BackgroundValidationMpsc(error) = error { gum::debug!( target: LOG_TARGET, + ?candidate_hash, ?error, "Mpsc background validation mpsc died during validation- leaf no longer active?" ); } else { gum::error!( target: LOG_TARGET, - "Failed to validate and make available: {:?}", - e + ?candidate_hash, + ?error, + "Failed to validate and make available", ); } } From e4b6b8cd7973633f86d1b92a56abf2a946b7be84 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 23 Feb 2024 09:51:26 +0100 Subject: [PATCH 26/51] Add support for BHP local and BHK local (#3443) Related to https://github.com/paritytech/polkadot-sdk/issues/3400 Extracting small parts of https://github.com/paritytech/polkadot-sdk/pull/3429 into separate PR: - Add support for BHP local and BHK local - Increase the timeout for the bridge zomienet tests --- bridges/testing/tests/0001-asset-transfer/run.sh | 4 ++-- .../tests/0002-mandatory-headers-synced-while-idle/run.sh | 4 ++-- cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs | 8 ++++++++ cumulus/polkadot-parachain/src/command.rs | 6 ++++-- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh index 8a053ee72097..1e0d6cad3010 100755 --- a/bridges/testing/tests/0001-asset-transfer/run.sh +++ b/bridges/testing/tests/0001-asset-transfer/run.sh @@ -8,11 +8,11 @@ source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" ${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh --init --start-relayer & env_pid=$! -ensure_process_file $env_pid $TEST_DIR/rococo.env 400 +ensure_process_file $env_pid $TEST_DIR/rococo.env 600 rococo_dir=`cat $TEST_DIR/rococo.env` echo -ensure_process_file $env_pid $TEST_DIR/westend.env 180 +ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index 423f4a1bcc0f..c39796d51d56 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -10,11 +10,11 @@ source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" ${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh & env_pid=$! -ensure_process_file $env_pid $TEST_DIR/rococo.env 400 +ensure_process_file $env_pid $TEST_DIR/rococo.env 600 rococo_dir=`cat $TEST_DIR/rococo.env` echo -ensure_process_file $env_pid $TEST_DIR/westend.env 180 +ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 1db826ea7daf..15e8a1bf11a0 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -25,7 +25,10 @@ use std::str::FromStr; #[derive(Debug, PartialEq)] pub enum BridgeHubRuntimeType { Kusama, + KusamaLocal, + Polkadot, + PolkadotLocal, Rococo, RococoLocal, @@ -44,7 +47,9 @@ impl FromStr for BridgeHubRuntimeType { fn from_str(value: &str) -> Result { match value { polkadot::BRIDGE_HUB_POLKADOT => Ok(BridgeHubRuntimeType::Polkadot), + polkadot::BRIDGE_HUB_POLKADOT_LOCAL => Ok(BridgeHubRuntimeType::PolkadotLocal), kusama::BRIDGE_HUB_KUSAMA => Ok(BridgeHubRuntimeType::Kusama), + kusama::BRIDGE_HUB_KUSAMA_LOCAL => Ok(BridgeHubRuntimeType::KusamaLocal), westend::BRIDGE_HUB_WESTEND => Ok(BridgeHubRuntimeType::Westend), westend::BRIDGE_HUB_WESTEND_LOCAL => Ok(BridgeHubRuntimeType::WestendLocal), westend::BRIDGE_HUB_WESTEND_DEVELOPMENT => Ok(BridgeHubRuntimeType::WestendDevelopment), @@ -103,6 +108,7 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), + other => Err(std::format!("No default config present for {:?}", other)), } } } @@ -242,6 +248,7 @@ pub mod rococo { /// Sub-module for Kusama setup pub mod kusama { pub(crate) const BRIDGE_HUB_KUSAMA: &str = "bridge-hub-kusama"; + pub(crate) const BRIDGE_HUB_KUSAMA_LOCAL: &str = "bridge-hub-kusama-local"; } /// Sub-module for Westend setup. @@ -358,4 +365,5 @@ pub mod westend { /// Sub-module for Polkadot setup pub mod polkadot { pub(crate) const BRIDGE_HUB_POLKADOT: &str = "bridge-hub-polkadot"; + pub(crate) const BRIDGE_HUB_POLKADOT_LOCAL: &str = "bridge-hub-polkadot-local"; } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index acba32f048be..a40c356dcd13 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -755,14 +755,16 @@ pub fn run() -> Result<()> { .map_err(Into::into), BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | + chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal => crate::service::start_generic_aura_node::< RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | + chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal => crate::service::start_generic_aura_node::< RuntimeApi, AuraId, From 11b5354fd3ced03003da1901aba704791f367849 Mon Sep 17 00:00:00 2001 From: Ignacio Palacios Date: Fri, 23 Feb 2024 12:54:54 +0100 Subject: [PATCH 27/51] Fix `DepositReserveAsset` fees payment (#3340) The `fee` should be calculated with the reanchored asset, otherwise it could lead to a failure where the set aside fee ends up not being enough. @acatangiu --------- Co-authored-by: Adrian Catangiu --- Cargo.lock | 2 + polkadot/node/test/service/src/chain_spec.rs | 1 + .../runtime/test-runtime/src/xcm_config.rs | 54 +++++++++--- .../xcm-executor/integration-tests/Cargo.toml | 2 + .../xcm-executor/integration-tests/src/lib.rs | 83 +++++++++++++++++++ polkadot/xcm/xcm-executor/src/lib.rs | 4 +- 6 files changed, 132 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 950b4e0889a1..d9791ec0147f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22323,10 +22323,12 @@ dependencies = [ "pallet-transaction-payment", "pallet-xcm", "parity-scale-codec", + "polkadot-service", "polkadot-test-client", "polkadot-test-runtime", "polkadot-test-service", "sp-consensus", + "sp-core", "sp-keyring", "sp-runtime", "sp-state-machine", diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index 0295090b9521..4cc387317c3f 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -169,6 +169,7 @@ fn polkadot_testnet_genesis( paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, + max_downward_message_size: 1024, ..Default::default() }, } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index a81d35f4d788..a48bca17e9ff 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -16,14 +16,16 @@ use frame_support::{ parameter_types, - traits::{Everything, Nothing}, + traits::{Everything, Get, Nothing}, weights::Weight, }; use frame_system::EnsureRoot; +use polkadot_runtime_parachains::FeeTracker; +use runtime_common::xcm_sender::{ChildParachainRouter, PriceForMessageDelivery}; use xcm::latest::prelude::*; use xcm_builder::{ AllowUnpaidExecutionFrom, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, - SignedAccountId32AsNative, SignedToAccountId32, + SignedAccountId32AsNative, SignedToAccountId32, WithUniqueTopic, }; use xcm_executor::{ traits::{TransactAsset, WeightTrader}, @@ -36,6 +38,8 @@ parameter_types! { pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 16; pub const UniversalLocation: xcm::latest::InteriorLocation = xcm::latest::Junctions::Here; + pub TokenLocation: Location = Here.into_location(); + pub FeeAssetId: AssetId = AssetId(TokenLocation::get()); } /// Type to convert an `Origin` type value into a `Location` value which represents an interior @@ -45,17 +49,43 @@ pub type LocalOriginToLocation = ( SignedToAccountId32, ); -pub struct DoNothingRouter; -impl SendXcm for DoNothingRouter { - type Ticket = (); - fn validate(_dest: &mut Option, _msg: &mut Option>) -> SendResult<()> { - Ok(((), Assets::new())) - } - fn deliver(_: ()) -> Result { - Ok([0; 32]) +/// Implementation of [`PriceForMessageDelivery`], returning a different price +/// based on whether a message contains a reanchored asset or not. +/// This implementation ensures that messages with non-reanchored assets return higher +/// prices than messages with reanchored assets. +/// Useful for `deposit_reserve_asset_works_for_any_xcm_sender` integration test. +pub struct TestDeliveryPrice(sp_std::marker::PhantomData<(A, F)>); +impl, F: FeeTracker> PriceForMessageDelivery for TestDeliveryPrice { + type Id = F::Id; + + fn price_for_delivery(_: Self::Id, msg: &Xcm<()>) -> Assets { + let base_fee: super::Balance = 1_000_000; + + let parents = msg.iter().find_map(|xcm| match xcm { + ReserveAssetDeposited(assets) => { + let AssetId(location) = &assets.inner().first().unwrap().id; + Some(location.parents) + }, + _ => None, + }); + + // If no asset is found, price defaults to `base_fee`. + let amount = base_fee + .saturating_add(base_fee.saturating_mul(parents.unwrap_or(0) as super::Balance)); + + (A::get(), amount).into() } } +pub type PriceForChildParachainDelivery = TestDeliveryPrice; + +/// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our +/// individual routers. +pub type XcmRouter = WithUniqueTopic< + // Only one router so far - use DMP to communicate with child parachains. + ChildParachainRouter, +>; + pub type Barrier = AllowUnpaidExecutionFrom; pub struct DummyAssetTransactor; @@ -99,7 +129,7 @@ type OriginConverter = ( pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = super::RuntimeCall; - type XcmSender = DoNothingRouter; + type XcmSender = XcmRouter; type AssetTransactor = DummyAssetTransactor; type OriginConverter = OriginConverter; type IsReserve = (); @@ -133,7 +163,7 @@ impl pallet_xcm::Config for crate::Runtime { type UniversalLocation = UniversalLocation; type SendXcmOrigin = EnsureXcmOrigin; type Weigher = FixedWeightBounds; - type XcmRouter = DoNothingRouter; + type XcmRouter = XcmRouter; type XcmExecuteFilter = Everything; type XcmExecutor = xcm_executor::XcmExecutor; type XcmTeleportFilter = Everything; diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index cafe12dc587f..1e572e6210a2 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -20,6 +20,7 @@ pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } polkadot-test-runtime = { path = "../../../runtime/test-runtime" } polkadot-test-service = { path = "../../../node/test/service" } +polkadot-service = { path = "../../../node/service" } sp-consensus = { path = "../../../../substrate/primitives/consensus/common" } sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } @@ -27,6 +28,7 @@ sp-state-machine = { path = "../../../../substrate/primitives/state-machine" } xcm = { package = "staging-xcm", path = "../..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = ".." } sp-tracing = { path = "../../../../substrate/primitives/tracing" } +sp-core = { path = "../../../../substrate/primitives/core" } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 79d6cb1c411b..da7fc0d97825 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -19,12 +19,14 @@ use codec::Encode; use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; +use polkadot_service::chain_spec::get_account_id_from_seed; use polkadot_test_client::{ BlockBuilderExt, ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder, TestClientBuilder, TestClientBuilderExt, }; use polkadot_test_runtime::{pallet_test_notifier, xcm_config::XcmConfig}; use polkadot_test_service::construct_extrinsic; +use sp_core::sr25519; use sp_runtime::traits::Block; use sp_state_machine::InspectState; use xcm::{latest::prelude::*, VersionedResponse, VersionedXcm}; @@ -323,3 +325,84 @@ fn query_response_elicits_handler() { ))); }); } + +/// Simulates a cross-chain message from Parachain to Parachain through Relay Chain +/// that deposits assets into the reserve of the destination. +/// Regression test for `DepostiReserveAsset` changes in +/// +#[test] +fn deposit_reserve_asset_works_for_any_xcm_sender() { + sp_tracing::try_init_simple(); + let mut client = TestClientBuilder::new().build(); + + // Init values for the simulated origin Parachain + let amount_to_send: u128 = 1_000_000_000_000; + let assets: Assets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + let max_assets = assets.len() as u32; + let fees = assets.get(fee_asset_item as usize).unwrap().clone(); + let weight_limit = Unlimited; + let reserve = Location::parent(); + let dest = Location::new(1, [Parachain(2000)]); + let beneficiary_id = get_account_id_from_seed::("Alice"); + let beneficiary = Location::new(0, [AccountId32 { network: None, id: beneficiary_id.into() }]); + + // spends up to half of fees for execution on reserve and other half for execution on + // destination + let fee1 = amount_to_send.saturating_div(2); + let fee2 = amount_to_send.saturating_sub(fee1); + let fees_half_1 = Asset::from((fees.id.clone(), Fungible(fee1))); + let fees_half_2 = Asset::from((fees.id.clone(), Fungible(fee2))); + + let reserve_context = ::UniversalLocation::get(); + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1.reanchored(&reserve, &reserve_context).unwrap(); + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = fees_half_2.reanchored(&dest, &reserve_context).unwrap(); + // identifies assets as seen by `reserve` - to be used at reserve chain + let assets_reanchored = assets.reanchored(&reserve, &reserve_context).unwrap(); + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, &reserve_context).unwrap(); + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, + DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, + ]); + // xcm to be executed at reserve + let msg = Xcm(vec![ + WithdrawAsset(assets_reanchored), + ClearOrigin, + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ]); + + let mut block_builder = client.init_polkadot_block_builder(); + + // Simulate execution of an incoming XCM message at the reserve chain + let execute = construct_extrinsic( + &client, + polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { + message: Box::new(VersionedXcm::from(msg)), + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + }), + sp_keyring::Sr25519Keyring::Alice, + 0, + ); + + block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); + + let block = block_builder.build().expect("Finalizes the block").block; + let block_hash = block.hash(); + + futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block)) + .expect("imports the block"); + + client.state_at(block_hash).expect("state should exist").inspect_state(|| { + assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { + outcome: Outcome::Complete { .. } + }), + ))); + }); +} diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index b26779f3ae9d..c61e1e1d15bc 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -826,9 +826,9 @@ impl XcmExecutor { // be weighed let to_weigh = self.holding.saturating_take(assets.clone()); self.holding.subsume_assets(to_weigh.clone()); - + let to_weigh_reanchored = Self::reanchored(to_weigh, &dest, None); let mut message_to_weigh = - vec![ReserveAssetDeposited(to_weigh.into()), ClearOrigin]; + vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; message_to_weigh.extend(xcm.0.clone().into_iter()); let (_, fee) = validate_send::(dest.clone(), Xcm(message_to_weigh))?; From 64050cf3474adb8705991f8e3a0e2fd36bf5dcd4 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 23 Feb 2024 14:04:10 +0100 Subject: [PATCH 28/51] prdoc: Print errors on CI (#3459) By passing `RUST_LOG=info` to the check command, we will be able to see the exact problem with a given prdoc. Before: ``` PR #3243 -> ERR ``` After: ``` [2024-02-23T12:53:55Z INFO prdoclib::commands::check] Checking directory prdoc [2024-02-23T12:53:55Z INFO prdoclib::commands::check] Using schema: /Users/sebastian/work/repos/polkadot-sdk/prdoc/schema_user.json [2024-02-23T12:53:55Z WARN prdoclib::schema] validation_result: false [2024-02-23T12:53:55Z WARN prdoclib::schema] validation_result_strict: false [2024-02-23T12:53:55Z WARN prdoclib::schema] errors: [ Required { path: "/title", }, ] [2024-02-23T12:53:55Z WARN prdoclib::schema] missing: [] [2024-02-23T12:53:55Z ERROR prdoclib::commands::check] Loading the schema failed: [2024-02-23T12:53:55Z ERROR prdoclib::commands::check] ValidationErrors(ValidationState { errors: [Required { path: "/title" }], missing: [], replacement: None, evaluated: {"/doc/0/description", "/crates/0/name", "/doc/0", "/crates", "/crates/0", "", "/doc", "/doc/0/audience"} }) PR #3243 -> ERR ``` --- .github/workflows/check-prdoc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 5503b61d6817..91701ca036e0 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -56,4 +56,4 @@ jobs: run: | echo "Checking for PR#${GITHUB_PR}" echo "You can find more information about PRDoc at $PRDOC_DOC" - $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} + $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR} From f2645eec12f080a66228aedbeaecea2c913570ed Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 23 Feb 2024 14:10:06 +0100 Subject: [PATCH 29/51] Contracts Add new version for marking new stable API (#3415) Add a `ApiVersion` constant to the pallet-contracts Config to communicate with developers the current state of the host functions exposed by the pallet --- .../contracts-rococo/src/contracts.rs | 1 + prdoc/pr_3415.prdoc | 9 +++++++++ substrate/bin/node/runtime/src/lib.rs | 1 + .../src/parachain/contracts_config.rs | 1 + substrate/frame/contracts/src/lib.rs | 18 ++++++++++++++++++ substrate/frame/contracts/src/tests.rs | 1 + 6 files changed, 31 insertions(+) create mode 100644 prdoc/pr_3415.prdoc diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 7b89f2df8077..681b95ce6a53 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -72,5 +72,6 @@ impl Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = pallet_xcm::Pallet; } diff --git a/prdoc/pr_3415.prdoc b/prdoc/pr_3415.prdoc new file mode 100644 index 000000000000..c56a5d3ffaa1 --- /dev/null +++ b/prdoc/pr_3415.prdoc @@ -0,0 +1,9 @@ +title: "[pallet-contracts] Add APIVersion to the config." + +doc: + - audience: Runtime Dev + description: | + Add `APIVersion` to the config to communicate the state of the Host functions exposed by the pallet. + +crates: + - name: pallet-contracts diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 24c8f6f48e96..b34d8c89e568 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1367,6 +1367,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = (); } diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index dadba394e264..3f26c6f372ef 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -94,5 +94,6 @@ impl pallet_contracts::Config for Runtime { type WeightPrice = Self; type Debug = (); type Environment = (); + type ApiVersion = (); type Xcm = pallet_xcm::Pallet; } diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 943bf5f85d4e..d8939ee88baf 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -214,6 +214,18 @@ pub struct Environment { block_number: EnvironmentType>, } +/// Defines the current version of the HostFn APIs. +/// This is used to communicate the available APIs in pallet-contracts. +/// +/// The version is bumped any time a new HostFn is added or stabilized. +#[derive(Encode, Decode, TypeInfo)] +pub struct ApiVersion(u16); +impl Default for ApiVersion { + fn default() -> Self { + Self(1) + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -402,6 +414,12 @@ pub mod pallet { #[pallet::constant] type Environment: Get>; + /// The version of the HostFn APIs that are available in the runtime. + /// + /// Only valid value is `()`. + #[pallet::constant] + type ApiVersion: Get; + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and /// execute XCM programs. type Xcm: xcm_builder::Controller< diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index b39fc79b62a0..2d1c5b315a34 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -465,6 +465,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type ApiVersion = (); type Xcm = (); } From 5fc6d67be081b4f2638d3d46d0002f95392b370c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dino=20Pa=C4=8Dandi?= <3002868+Dinonard@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:56:32 +0100 Subject: [PATCH 30/51] pallet-membership weights (#3324) ## Summary * use benchamarked weights instead of hardcoded ones for `pallet-membership` * rename benchmark to match extrinsic name * remove unnecessary dependency from `clear_prime` --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- prdoc/pr_3324.prdoc | 13 +++ substrate/frame/membership/src/lib.rs | 124 ++++++++++++++-------- substrate/frame/membership/src/weights.rs | 18 ++-- 3 files changed, 99 insertions(+), 56 deletions(-) create mode 100644 prdoc/pr_3324.prdoc diff --git a/prdoc/pr_3324.prdoc b/prdoc/pr_3324.prdoc new file mode 100644 index 000000000000..0425fbf317c8 --- /dev/null +++ b/prdoc/pr_3324.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Fix weight calculation and event emission in pallet-membership" + +doc: + - audience: Runtime Dev + description: | + Bug fix for the membership pallet to use correct weights. Also no event will be emitted + anymore when `change_key` is called with identical accounts. + +crates: + - name: pallet-membership diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 19b5e54d308c..f32263970038 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -27,7 +27,7 @@ use frame_support::{ traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, BoundedVec, }; -use sp_runtime::traits::StaticLookup; +use sp_runtime::traits::{StaticLookup, UniqueSaturatedInto}; use sp_std::prelude::*; pub mod migrations; @@ -163,12 +163,16 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::call_index(0)] - #[pallet::weight({50_000_000})] - pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::add_member(T::MaxMembers::get()))] + pub fn add_member( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::AddOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; let mut members = >::get(); + let init_length = members.len(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members .try_insert(location, who.clone()) @@ -179,19 +183,24 @@ pub mod pallet { T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); Self::deposit_event(Event::MemberAdded); - Ok(()) + + Ok(Some(T::WeightInfo::add_member(init_length as u32)).into()) } /// Remove a member `who` from the set. /// /// May only be called from `T::RemoveOrigin`. #[pallet::call_index(1)] - #[pallet::weight({50_000_000})] - pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::remove_member(T::MaxMembers::get()))] + pub fn remove_member( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::RemoveOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; let mut members = >::get(); + let init_length = members.len(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); @@ -201,7 +210,7 @@ pub mod pallet { Self::rejig_prime(&members); Self::deposit_event(Event::MemberRemoved); - Ok(()) + Ok(Some(T::WeightInfo::remove_member(init_length as u32)).into()) } /// Swap out one member `remove` for another `add`. @@ -210,18 +219,18 @@ pub mod pallet { /// /// Prime membership is *not* passed from `remove` to `add`, if extant. #[pallet::call_index(2)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::swap_member(T::MaxMembers::get()))] pub fn swap_member( origin: OriginFor, remove: AccountIdLookupOf, add: AccountIdLookupOf, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { T::SwapOrigin::ensure_origin(origin)?; let remove = T::Lookup::lookup(remove)?; let add = T::Lookup::lookup(add)?; if remove == add { - return Ok(()) + return Ok(().into()); } let mut members = >::get(); @@ -236,7 +245,7 @@ pub mod pallet { Self::rejig_prime(&members); Self::deposit_event(Event::MembersSwapped); - Ok(()) + Ok(Some(T::WeightInfo::swap_member(members.len() as u32)).into()) } /// Change the membership to a new set, disregarding the existing membership. Be nice and @@ -244,7 +253,7 @@ pub mod pallet { /// /// May only be called from `T::ResetOrigin`. #[pallet::call_index(3)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::reset_members(members.len().unique_saturated_into()))] pub fn reset_members(origin: OriginFor, members: Vec) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; @@ -267,56 +276,65 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::call_index(4)] - #[pallet::weight({50_000_000})] - pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::change_key(T::MaxMembers::get()))] + pub fn change_key( + origin: OriginFor, + new: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { let remove = ensure_signed(origin)?; let new = T::Lookup::lookup(new)?; - if remove != new { - let mut members = >::get(); - let location = - members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; - let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; - members[location] = new.clone(); - members.sort(); - - >::put(&members); - - T::MembershipChanged::change_members_sorted( - &[new.clone()], - &[remove.clone()], - &members[..], - ); - - if Prime::::get() == Some(remove) { - Prime::::put(&new); - T::MembershipChanged::set_prime(Some(new)); - } + if remove == new { + return Ok(().into()); + } + + let mut members = >::get(); + let members_length = members.len() as u32; + let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; + let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; + members[location] = new.clone(); + members.sort(); + + >::put(&members); + + T::MembershipChanged::change_members_sorted( + &[new.clone()], + &[remove.clone()], + &members[..], + ); + + if Prime::::get() == Some(remove) { + Prime::::put(&new); + T::MembershipChanged::set_prime(Some(new)); } Self::deposit_event(Event::KeyChanged); - Ok(()) + Ok(Some(T::WeightInfo::change_key(members_length)).into()) } /// Set the prime member. Must be a current member. /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(5)] - #[pallet::weight({50_000_000})] - pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { + #[pallet::weight(T::WeightInfo::set_prime(T::MaxMembers::get()))] + pub fn set_prime( + origin: OriginFor, + who: AccountIdLookupOf, + ) -> DispatchResultWithPostInfo { T::PrimeOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(who)?; - Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; + let members = Self::members(); + members.binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); - Ok(()) + Ok(Some(T::WeightInfo::set_prime(members.len() as u32)).into()) } /// Remove the prime member if it exists. /// /// May only be called from `T::PrimeOrigin`. #[pallet::call_index(6)] - #[pallet::weight({50_000_000})] + #[pallet::weight(T::WeightInfo::clear_prime())] pub fn clear_prime(origin: OriginFor) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; Prime::::kill(); @@ -442,7 +460,7 @@ mod benchmark { } // er keep the prime common between incoming and outgoing to make sure it is rejigged. - reset_member { + reset_members { let m in 1 .. T::MaxMembers::get(); let members = (1..m+1).map(|i| account("member", i, SEED)).collect::>(); @@ -500,8 +518,7 @@ mod benchmark { } clear_prime { - let m in 1 .. T::MaxMembers::get(); - let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let members = (0..T::MaxMembers::get()).map(|i| account("member", i, SEED)).collect::>(); let prime = members.last().cloned().unwrap(); set_members::(members, None); }: { @@ -526,7 +543,8 @@ mod tests { use sp_runtime::{bounded_vec, traits::BadOrigin, BuildStorage}; use frame_support::{ - assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, + assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, + parameter_types, traits::{ConstU32, StorageVersion}, }; use frame_system::EnsureSignedBy; @@ -716,6 +734,17 @@ mod tests { }); } + #[test] + fn swap_member_with_identical_arguments_changes_nothing() { + new_test_ext().execute_with(|| { + assert_storage_noop!(assert_ok!(Membership::swap_member( + RuntimeOrigin::signed(3), + 10, + 10 + ))); + }); + } + #[test] fn change_key_works() { new_test_ext().execute_with(|| { @@ -745,6 +774,13 @@ mod tests { }); } + #[test] + fn change_key_with_same_caller_as_argument_changes_nothing() { + new_test_ext().execute_with(|| { + assert_storage_noop!(assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 10))); + }); + } + #[test] fn reset_members_works() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 18ea7fcb315a..2d18848b89ab 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -55,10 +55,10 @@ pub trait WeightInfo { fn add_member(m: u32, ) -> Weight; fn remove_member(m: u32, ) -> Weight; fn swap_member(m: u32, ) -> Weight; - fn reset_member(m: u32, ) -> Weight; + fn reset_members(m: u32, ) -> Weight; fn change_key(m: u32, ) -> Weight; fn set_prime(m: u32, ) -> Weight; - fn clear_prime(m: u32, ) -> Weight; + fn clear_prime() -> Weight; } /// Weights for pallet_membership using the Substrate node and recommended hardware. @@ -142,7 +142,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { + fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` @@ -200,15 +200,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 3_373_000 picoseconds. Weight::from_parts(3_750_452, 0) - // Standard Error: 142 - .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(T::DbWeight::get().writes(2_u64)) } } @@ -293,7 +290,7 @@ impl WeightInfo for () { /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) /// The range of component `m` is `[1, 100]`. - fn reset_member(m: u32, ) -> Weight { + fn reset_members(m: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `312 + m * (64 ±0)` // Estimated: `4687 + m * (64 ±0)` @@ -351,15 +348,12 @@ impl WeightInfo for () { /// Proof: TechnicalMembership Prime (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) /// Storage: TechnicalCommittee Prime (r:0 w:1) /// Proof Skipped: TechnicalCommittee Prime (max_values: Some(1), max_size: None, mode: Measured) - /// The range of component `m` is `[1, 100]`. - fn clear_prime(m: u32, ) -> Weight { + fn clear_prime() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` // Minimum execution time: 3_373_000 picoseconds. Weight::from_parts(3_750_452, 0) - // Standard Error: 142 - .saturating_add(Weight::from_parts(505, 0).saturating_mul(m.into())) .saturating_add(RocksDbWeight::get().writes(2_u64)) } } From 3386377b0f88cb2c7cfafe8a5f5b3b41b85dde59 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 23 Feb 2024 15:09:49 +0100 Subject: [PATCH 31/51] PoV Reclaim Runtime Side (#3002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Runtime side for PoV Reclaim ## Implementation Overview - Hostfunction to fetch the storage proof size has been added to the PVF. It uses the size tracking recorder that was introduced in my previous PR. - Mechanisms to use the reclaim HostFunction have been introduced. - 1. A SignedExtension that checks the node-reported proof size before and after application of an extrinsic. Then it reclaims the difference. - 2. A manual helper to make reclaiming easier when manual interaction is required, for example in `on_idle` or other hooks. - In order to utilize the manual reclaiming, I modified `WeightMeter` to support the reduction of consumed weight, at least for storage proof size. ## How to use To enable the general functionality for a parachain: 1. Add the SignedExtension to your parachain runtime. 2. Provide the HostFunction to the node 3. Enable proof recording during block import ## TODO - [x] PRDoc --------- Co-authored-by: Dmitry Markin Co-authored-by: Davide Galassi Co-authored-by: Bastian Köcher --- .gitlab/pipeline/check.yml | 4 +- Cargo.lock | 23 + Cargo.toml | 1 + .../src/validate_block/implementation.rs | 29 +- .../src/validate_block/tests.rs | 2 +- .../src/validate_block/trie_recorder.rs | 11 + .../parachain-template/node/src/command.rs | 3 +- .../parachain-template/node/src/service.rs | 8 +- cumulus/parachain-template/runtime/Cargo.toml | 2 + cumulus/parachain-template/runtime/src/lib.rs | 1 + cumulus/polkadot-parachain/src/command.rs | 3 +- cumulus/polkadot-parachain/src/service.rs | 13 +- .../proof-size-hostfunction/src/lib.rs | 4 +- .../storage-weight-reclaim/Cargo.toml | 46 ++ .../storage-weight-reclaim/src/lib.rs | 663 ++++++++++++++++++ cumulus/test/client/Cargo.toml | 1 + cumulus/test/client/src/lib.rs | 20 +- cumulus/test/runtime/Cargo.toml | 2 + cumulus/test/runtime/src/lib.rs | 1 + cumulus/test/service/Cargo.toml | 1 + cumulus/test/service/src/lib.rs | 5 +- prdoc/pr_3002.prdoc | 29 + substrate/frame/system/src/lib.rs | 3 +- .../primitives/weights/src/weight_meter.rs | 20 + substrate/test-utils/client/src/lib.rs | 9 + 25 files changed, 875 insertions(+), 29 deletions(-) create mode 100644 cumulus/primitives/storage-weight-reclaim/Cargo.toml create mode 100644 cumulus/primitives/storage-weight-reclaim/src/lib.rs create mode 100644 prdoc/pr_3002.prdoc diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index cdb5d1b05d09..4d71a473372d 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -108,8 +108,10 @@ check-toml-format: export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Downloading try-runtime CLI ----------" - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime chmod +x ./try-runtime + echo "Using try-runtime-cli version:" + ./try-runtime --version echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime diff --git a/Cargo.lock b/Cargo.lock index d9791ec0147f..fbe2729acba1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4047,6 +4047,25 @@ dependencies = [ "sp-trie", ] +[[package]] +name = "cumulus-primitives-storage-weight-reclaim" +version = "1.0.0" +dependencies = [ + "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-test-runtime", + "docify", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-trie", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.7.0" @@ -4209,6 +4228,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -4253,6 +4273,7 @@ version = "0.1.0" dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "frame-executive", "frame-support", "frame-system", @@ -4295,6 +4316,7 @@ dependencies = [ "cumulus-client-service", "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -11340,6 +11362,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-utility", "frame-benchmarking", "frame-executive", diff --git a/Cargo.toml b/Cargo.toml index 774ce1b52a39..1d27cfe95392 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,6 +127,7 @@ members = [ "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", "cumulus/primitives/proof-size-hostfunction", + "cumulus/primitives/storage-weight-reclaim", "cumulus/primitives/timestamp", "cumulus/primitives/utility", "cumulus/test/client", diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index ce3b724420f1..ecab7a9a0931 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -16,7 +16,7 @@ //! The actual implementation of the validate block functionality. -use super::{trie_cache, MemoryOptimizedValidationParams}; +use super::{trie_cache, trie_recorder, MemoryOptimizedValidationParams}; use cumulus_primitives_core::{ relay_chain::Hash as RHash, ParachainBlockData, PersistedValidationData, }; @@ -34,12 +34,14 @@ use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; use sp_std::prelude::*; -use sp_trie::MemoryDB; +use sp_trie::{MemoryDB, ProofSizeProvider}; +use trie_recorder::SizeOnlyRecorderProvider; type TrieBackend = sp_state_machine::TrieBackend< MemoryDB>, HashingFor, trie_cache::CacheProvider>, + SizeOnlyRecorderProvider>, >; type Ext<'a, B> = sp_state_machine::Ext<'a, HashingFor, TrieBackend>; @@ -48,6 +50,9 @@ fn with_externalities R, R>(f: F) -> R { sp_externalities::with_externalities(f).expect("Environmental externalities not set.") } +// Recorder instance to be used during this validate_block call. +environmental::environmental!(recorder: trait ProofSizeProvider); + /// Validate the given parachain block. /// /// This function is doing roughly the following: @@ -120,6 +125,7 @@ where sp_std::mem::drop(storage_proof); + let mut recorder = SizeOnlyRecorderProvider::new(); let cache_provider = trie_cache::CacheProvider::new(); // We use the storage root of the `parent_head` to ensure that it is the correct root. // This is already being done above while creating the in-memory db, but let's be paranoid!! @@ -128,6 +134,7 @@ where *parent_header.state_root(), cache_provider, ) + .with_recorder(recorder.clone()) .build(); let _guard = ( @@ -167,9 +174,11 @@ where .replace_implementation(host_default_child_storage_next_key), sp_io::offchain_index::host_set.replace_implementation(host_offchain_index_set), sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear), + cumulus_primitives_proof_size_hostfunction::storage_proof_size::host_storage_proof_size + .replace_implementation(host_storage_proof_size), ); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let relay_chain_proof = crate::RelayChainStateProof::new( PSC::SelfParaId::get(), inherent_data.validation_data.relay_parent_storage_root, @@ -190,7 +199,7 @@ where } }); - run_with_externalities::(&backend, || { + run_with_externalities_and_recorder::(&backend, &mut recorder, || { let head_data = HeadData(block.header().encode()); E::execute_block(block); @@ -265,15 +274,17 @@ fn validate_validation_data( ); } -/// Run the given closure with the externalities set. -fn run_with_externalities R>( +/// Run the given closure with the externalities and recorder set. +fn run_with_externalities_and_recorder R>( backend: &TrieBackend, + recorder: &mut SizeOnlyRecorderProvider>, execute: F, ) -> R { let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut ext = Ext::::new(&mut overlay, backend); + recorder.reset(); - set_and_run_with_externalities(&mut ext, || execute()) + recorder::using(recorder, || set_and_run_with_externalities(&mut ext, || execute())) } fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { @@ -305,6 +316,10 @@ fn host_storage_clear(key: &[u8]) { with_externalities(|ext| ext.place_storage(key.to_vec(), None)) } +fn host_storage_proof_size() -> u64 { + recorder::with(|rec| rec.estimate_encoded_size()).expect("Recorder is always set; qed") as _ +} + fn host_storage_root(version: StateVersion) -> Vec { with_externalities(|ext| ext.storage_root(version)) } diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index f17ac6007a09..a9fb65e11089 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -59,7 +59,7 @@ fn call_validate_block( } fn create_test_client() -> (Client, Header) { - let client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().enable_import_proof_recording().build(); let genesis_header = client .header(client.chain_info().genesis_hash) diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index e73aef70aa49..48310670c074 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -97,6 +97,7 @@ pub(crate) struct SizeOnlyRecorderProvider { } impl SizeOnlyRecorderProvider { + /// Create a new instance of [`SizeOnlyRecorderProvider`] pub fn new() -> Self { Self { seen_nodes: Default::default(), @@ -104,6 +105,13 @@ impl SizeOnlyRecorderProvider { recorded_keys: Default::default(), } } + + /// Reset the internal state. + pub fn reset(&self) { + self.seen_nodes.borrow_mut().clear(); + *self.encoded_size.borrow_mut() = 0; + self.recorded_keys.borrow_mut().clear(); + } } impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { @@ -281,6 +289,9 @@ mod tests { reference_recorder.estimate_encoded_size(), recorder_for_test.estimate_encoded_size() ); + + recorder_for_test.reset(); + assert_eq!(recorder_for_test.estimate_encoded_size(), 0) } } } diff --git a/cumulus/parachain-template/node/src/command.rs b/cumulus/parachain-template/node/src/command.rs index 72b3ab7bb4b9..82624ae0be59 100644 --- a/cumulus/parachain-template/node/src/command.rs +++ b/cumulus/parachain-template/node/src/command.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -183,7 +184,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::, ()>(config)) + runner.sync_run(|config| cmd.run::, ReclaimHostFunctions>(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/parachain-template/node/src/service.rs b/cumulus/parachain-template/node/src/service.rs index 830b6e82f969..4dd24803e9b1 100644 --- a/cumulus/parachain-template/node/src/service.rs +++ b/cumulus/parachain-template/node/src/service.rs @@ -40,7 +40,10 @@ use substrate_prometheus_endpoint::Registry; pub struct ParachainNativeExecutor; impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + type ExtendHostFunctions = ( + cumulus_client_service::storage_proof_size::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, + ); fn dispatch(method: &str, data: &[u8]) -> Option> { parachain_template_runtime::api::dispatch(method, data) @@ -100,10 +103,11 @@ pub fn new_partial(config: &Configuration) -> Result let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + true, )?; let client = Arc::new(client); diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 44d96ffc4e62..1873bd0a23eb 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -73,6 +73,7 @@ cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../parachains/common", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } @@ -87,6 +88,7 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", "frame-executive/std", diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index d9bc111fcef7..cee9b33bf37c 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -107,6 +107,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index a40c356dcd13..4d44879af515 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -23,6 +23,7 @@ use crate::{ }, service::{new_partial, Block}, }; +use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions; use cumulus_primitives_core::ParaId; use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use log::info; @@ -584,7 +585,7 @@ pub fn run() -> Result<()> { match cmd { BenchmarkCmd::Pallet(cmd) => if cfg!(feature = "runtime-benchmarks") { - runner.sync_run(|config| cmd.run::, ()>(config)) + runner.sync_run(|config| cmd.run::, ReclaimHostFunctions>(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 553975b01a80..4e06cd38f1d7 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -68,11 +68,15 @@ use substrate_prometheus_endpoint::Registry; use polkadot_primitives::CollatorPair; #[cfg(not(feature = "runtime-benchmarks"))] -type HostFunctions = sp_io::SubstrateHostFunctions; +type HostFunctions = + (sp_io::SubstrateHostFunctions, cumulus_client_service::storage_proof_size::HostFunctions); #[cfg(feature = "runtime-benchmarks")] -type HostFunctions = - (sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions); +type HostFunctions = ( + sp_io::SubstrateHostFunctions, + cumulus_client_service::storage_proof_size::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); type ParachainClient = TFullClient>; @@ -274,10 +278,11 @@ where .build(); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts_record_import::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, + true, )?; let client = Arc::new(client); diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs index 6da6235e585a..8ebc58ea450d 100644 --- a/cumulus/primitives/proof-size-hostfunction/src/lib.rs +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] use sp_externalities::ExternalitiesExt; use sp_runtime_interface::runtime_interface; @@ -35,7 +36,8 @@ pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX; pub trait StorageProofSize { /// Returns the current storage proof size. fn storage_proof_size(&mut self) -> u64 { - self.extension::().map_or(u64::MAX, |e| e.storage_proof_size()) + self.extension::() + .map_or(PROOF_RECORDING_DISABLED, |e| e.storage_proof_size()) } } diff --git a/cumulus/primitives/storage-weight-reclaim/Cargo.toml b/cumulus/primitives/storage-weight-reclaim/Cargo.toml new file mode 100644 index 000000000000..4835fb5192b8 --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "cumulus-primitives-storage-weight-reclaim" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +description = "Utilities to reclaim storage weight." +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +log = { workspace = true } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } + +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } + +cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } +docify = "0.2.7" + +[dev-dependencies] +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +cumulus-test-runtime = { path = "../../test/runtime" } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-primitives-core/std", + "cumulus-primitives-proof-size-hostfunction/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-trie/std", +] diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs new file mode 100644 index 000000000000..5dddc92e3955 --- /dev/null +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -0,0 +1,663 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Mechanism to reclaim PoV proof size weight after an extrinsic has been applied. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use cumulus_primitives_core::Weight; +use cumulus_primitives_proof_size_hostfunction::{ + storage_proof_size::storage_proof_size, PROOF_RECORDING_DISABLED, +}; +use frame_support::{ + dispatch::{DispatchInfo, PostDispatchInfo}, + weights::WeightMeter, +}; +use frame_system::Config; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, + DispatchResult, +}; +use sp_std::marker::PhantomData; + +const LOG_TARGET: &'static str = "runtime::storage_reclaim"; + +/// `StorageWeightReclaimer` is a mechanism for manually reclaiming storage weight. +/// +/// It internally keeps track of the proof size and storage weight at initialization time. At +/// reclaim it computes the real consumed storage weight and refunds excess weight. +/// +/// # Example +#[doc = docify::embed!("src/lib.rs", simple_reclaimer_example)] +pub struct StorageWeightReclaimer { + previous_remaining_proof_size: u64, + previous_reported_proof_size: Option, +} + +impl StorageWeightReclaimer { + /// Creates a new `StorageWeightReclaimer` instance and initializes it with the storage + /// size provided by `weight_meter` and reported proof size from the node. + #[must_use = "Must call `reclaim_with_meter` to reclaim the weight"] + pub fn new(weight_meter: &WeightMeter) -> StorageWeightReclaimer { + let previous_remaining_proof_size = weight_meter.remaining().proof_size(); + let previous_reported_proof_size = get_proof_size(); + Self { previous_remaining_proof_size, previous_reported_proof_size } + } + + /// Check the consumed storage weight and calculate the consumed excess weight. + fn reclaim(&mut self, remaining_weight: Weight) -> Option { + let current_remaining_weight = remaining_weight.proof_size(); + let current_storage_proof_size = get_proof_size()?; + let previous_storage_proof_size = self.previous_reported_proof_size?; + let used_weight = + self.previous_remaining_proof_size.saturating_sub(current_remaining_weight); + let reported_used_size = + current_storage_proof_size.saturating_sub(previous_storage_proof_size); + let reclaimable = used_weight.saturating_sub(reported_used_size); + log::trace!( + target: LOG_TARGET, + "Found reclaimable storage weight. benchmarked: {used_weight}, consumed: {reported_used_size}" + ); + + self.previous_remaining_proof_size = current_remaining_weight.saturating_add(reclaimable); + self.previous_reported_proof_size = Some(current_storage_proof_size); + Some(Weight::from_parts(0, reclaimable)) + } + + /// Check the consumed storage weight and add the reclaimed + /// weight budget back to `weight_meter`. + pub fn reclaim_with_meter(&mut self, weight_meter: &mut WeightMeter) -> Option { + let reclaimed = self.reclaim(weight_meter.remaining())?; + weight_meter.reclaim_proof_size(reclaimed.proof_size()); + Some(reclaimed) + } +} + +/// Returns the current storage proof size from the host side. +/// +/// Returns `None` if proof recording is disabled on the host. +pub fn get_proof_size() -> Option { + let proof_size = storage_proof_size(); + (proof_size != PROOF_RECORDING_DISABLED).then_some(proof_size) +} + +/// Storage weight reclaim mechanism. +/// +/// This extension checks the size of the node-side storage proof +/// before and after executing a given extrinsic. The difference between +/// benchmarked and spent weight can be reclaimed. +#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct StorageWeightReclaim(PhantomData); + +impl StorageWeightReclaim { + /// Create a new `StorageWeightReclaim` instance. + pub fn new() -> Self { + Self(Default::default()) + } +} + +impl core::fmt::Debug for StorageWeightReclaim { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + let _ = write!(f, "StorageWeightReclaim"); + Ok(()) + } +} + +impl SignedExtension for StorageWeightReclaim +where + T::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "StorageWeightReclaim"; + + type AccountId = T::AccountId; + type Call = T::RuntimeCall; + type AdditionalSigned = (); + type Pre = Option; + + fn additional_signed( + &self, + ) -> Result + { + Ok(()) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(get_proof_size()) + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let Some(Some(pre_dispatch_proof_size)) = pre else { + return Ok(()); + }; + + let Some(post_dispatch_proof_size) = get_proof_size() else { + log::debug!( + target: LOG_TARGET, + "Proof recording enabled during pre-dispatch, now disabled. This should not happen." + ); + return Ok(()) + }; + let benchmarked_weight = info.weight.proof_size(); + let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); + + // Unspent weight according to the `actual_weight` from `PostDispatchInfo` + // This unspent weight will be refunded by the `CheckWeight` extension, so we need to + // account for that. + let unspent = post_info.calc_unspent(info).proof_size(); + let storage_size_diff = + benchmarked_weight.saturating_sub(unspent).abs_diff(consumed_weight as u64); + + // This value will be reclaimed by [`frame_system::CheckWeight`], so we need to calculate + // that in. + frame_system::BlockWeight::::mutate(|current| { + if consumed_weight > benchmarked_weight { + log::error!( + target: LOG_TARGET, + "Benchmarked storage weight smaller than consumed storage weight. benchmarked: {benchmarked_weight} consumed: {consumed_weight} unspent: {unspent}" + ); + current.accrue(Weight::from_parts(0, storage_size_diff), info.class) + } else { + log::trace!( + target: LOG_TARGET, + "Reclaiming storage weight. benchmarked: {benchmarked_weight}, consumed: {consumed_weight} unspent: {unspent}" + ); + current.reduce(Weight::from_parts(0, storage_size_diff), info.class) + } + }); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::{ + assert_ok, + dispatch::DispatchClass, + weights::{Weight, WeightMeter}, + }; + use frame_system::{BlockWeight, CheckWeight}; + use sp_runtime::{AccountId32, BuildStorage}; + use sp_std::marker::PhantomData; + use sp_trie::proof_size_extension::ProofSizeExt; + + type Test = cumulus_test_runtime::Runtime; + const CALL: &::RuntimeCall = + &cumulus_test_runtime::RuntimeCall::System(frame_system::Call::set_heap_pages { + pages: 0u64, + }); + const ALICE: AccountId32 = AccountId32::new([1u8; 32]); + const LEN: usize = 0; + + pub fn new_test_ext() -> sp_io::TestExternalities { + let ext: sp_io::TestExternalities = cumulus_test_runtime::RuntimeGenesisConfig::default() + .build_storage() + .unwrap() + .into(); + ext + } + + struct TestRecorder { + return_values: Box<[usize]>, + counter: std::sync::atomic::AtomicUsize, + } + + impl TestRecorder { + fn new(values: &[usize]) -> Self { + TestRecorder { return_values: values.into(), counter: Default::default() } + } + } + + impl sp_trie::ProofSizeProvider for TestRecorder { + fn estimate_encoded_size(&self) -> usize { + let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + self.return_values[counter] + } + } + + fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities { + let mut test_ext = new_test_ext(); + let test_recorder = TestRecorder::new(proof_values); + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + test_ext + } + + fn set_current_storage_weight(new_weight: u64) { + BlockWeight::::mutate(|current_weight| { + current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal); + }); + } + + #[test] + fn basic_refund() { + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // We expect a refund of 400 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 600); + }) + } + + #[test] + fn does_nothing_without_extension() { + let mut test_ext = new_test_ext(); + + // Proof size extension not registered + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, None); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1000); + }) + } + + #[test] + fn negative_refund_is_added_to_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 100 + let info = DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // We expect no refund + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1100); + }) + } + + #[test] + fn test_zero_proof_size() { + let mut test_ext = setup_test_externalities(&[0, 0]); + + test_ext.execute_with(|| { + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 0); + }); + } + + #[test] + fn test_larger_pre_dispatch_proof_size() { + let mut test_ext = setup_test_externalities(&[300, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1300); + + let info = DispatchInfo { weight: Weight::from_parts(0, 500), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(300)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 800); + }); + } + + #[test] + fn test_incorporates_check_weight_unspent_weight() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_on_negative() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 300 + let info = DispatchInfo { weight: Weight::from_parts(100, 300), ..Default::default() }; + + // Actual weight is 50 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 250)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + assert_eq!(BlockWeight::::get().total().proof_size(), 900); + }) + } + + #[test] + fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { + let mut test_ext = setup_test_externalities(&[100, 300]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + // Benchmarked storage weight: 50 + let info = DispatchInfo { weight: Weight::from_parts(100, 50), ..Default::default() }; + + // Actual weight is 25 + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(50, 25)), + pays_fee: Default::default(), + }; + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(100)); + + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + // `CheckWeight` gets called after `StorageWeightReclaim` this time. + // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` + // we always need to call `post_dispatch` to verify that they interoperate correctly. + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + assert_eq!(BlockWeight::::get().total().proof_size(), 1150); + }) + } + + #[test] + fn storage_size_reported_correctly() { + let mut test_ext = setup_test_externalities(&[1000]); + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(1000)); + }); + + let mut test_ext = new_test_ext(); + + let test_recorder = TestRecorder::new(&[0]); + + test_ext.register_extension(ProofSizeExt::new(test_recorder)); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), Some(0)); + }); + } + + #[test] + fn storage_size_disabled_reported_correctly() { + let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]); + + test_ext.execute_with(|| { + assert_eq!(get_proof_size(), None); + }); + } + + #[test] + fn test_reclaim_helper() { + let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 2000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 500)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 200))); + + remaining_weight_meter.consume(Weight::from_parts(0, 800)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + assert_eq!(reclaimed, Some(Weight::from_parts(0, 300))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1200)); + }); + } + + #[test] + fn test_reclaim_helper_does_not_reclaim_negative() { + // Benchmarked weight does not change at all + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(0, 1000)); + }); + + // Benchmarked weight increases less than storage proof consumes + let mut test_ext = setup_test_externalities(&[1000, 1300]); + + test_ext.execute_with(|| { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(0, 1000)); + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + remaining_weight_meter.consume(Weight::from_parts(0, 0)); + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + assert_eq!(reclaimed, Some(Weight::from_parts(0, 0))); + }); + } + + /// Just here for doc purposes + fn get_benched_weight() -> Weight { + Weight::from_parts(0, 5) + } + + /// Just here for doc purposes + fn do_work() {} + + #[docify::export_content(simple_reclaimer_example)] + fn reclaim_with_weight_meter() { + let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10)); + + let benched_weight = get_benched_weight(); + + // It is important to instantiate the `StorageWeightReclaimer` before we consume the weight + // for a piece of work from the weight meter. + let mut reclaim_helper = StorageWeightReclaimer::new(&remaining_weight_meter); + + if remaining_weight_meter.try_consume(benched_weight).is_ok() { + // Perform some work that takes has `benched_weight` storage weight. + do_work(); + + // Reclaimer will detect that we only consumed 2 bytes, so 3 bytes are reclaimed. + let reclaimed = reclaim_helper.reclaim_with_meter(&mut remaining_weight_meter); + + // We reclaimed 3 bytes of storage size! + assert_eq!(reclaimed, Some(Weight::from_parts(0, 3))); + assert_eq!(BlockWeight::::get().total().proof_size(), 10); + assert_eq!(remaining_weight_meter.remaining(), Weight::from_parts(10, 8)); + } + } + + #[test] + fn test_reclaim_helper_works_with_meter() { + // The node will report 12 - 10 = 2 consumed storage size between the calls. + let mut test_ext = setup_test_externalities(&[10, 12]); + + test_ext.execute_with(|| { + // Initial storage size is 10. + set_current_storage_weight(10); + reclaim_with_weight_meter(); + }); + } +} diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 7190172101cb..028733ce2355 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -41,6 +41,7 @@ cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } [features] runtime-benchmarks = [ diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index df63f683de6b..c46f4da7f678 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -151,6 +151,7 @@ pub fn generate_extrinsic_with_pair( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), ); let function = function.into(); @@ -158,7 +159,7 @@ pub fn generate_extrinsic_with_pair( let raw_payload = SignedPayload::from_raw( function.clone(), extra.clone(), - ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); @@ -203,13 +204,16 @@ pub fn validate_block( let mut ext_ext = ext.ext(); let heap_pages = HeapAllocStrategy::Static { extra_pages: 1024 }; - let executor = WasmExecutor::::builder() - .with_execution_method(WasmExecutionMethod::default()) - .with_max_runtime_instances(1) - .with_runtime_cache_size(2) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .build(); + let executor = WasmExecutor::<( + sp_io::SubstrateHostFunctions, + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions, + )>::builder() + .with_execution_method(WasmExecutionMethod::default()) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .build(); executor .uncached_call( diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 5902a62512be..449a8b819bc0 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -39,6 +39,7 @@ sp-version = { path = "../../../substrate/primitives/version", default-features # Cumulus cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } @@ -49,6 +50,7 @@ std = [ "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 6068f895c83b..5fb314109844 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -331,6 +331,7 @@ pub type SignedExtra = ( frame_system::CheckNonce, frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index b26f0b9967cf..27273f4e0a8d 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -81,6 +81,7 @@ cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-no cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-primitives-storage-weight-reclaim = { path = "../../primitives/storage-weight-reclaim" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } [dev-dependencies] diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 1c2e1db97414..3554a383f219 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -112,7 +112,7 @@ pub type AnnounceBlockFn = Arc>) + Send + Sync>; pub struct RuntimeExecutor; impl sc_executor::NativeExecutionDispatch for RuntimeExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = cumulus_client_service::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) @@ -894,11 +894,12 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::::new(), ); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), extra.clone(), - ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); runtime::UncheckedExtrinsic::new_signed( diff --git a/prdoc/pr_3002.prdoc b/prdoc/pr_3002.prdoc new file mode 100644 index 000000000000..511a07e39c47 --- /dev/null +++ b/prdoc/pr_3002.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: PoV Reclaim Runtime Side +author: skunert +topic: runtime +doc: + - audience: Runtime Dev + description: | + Adds a mechanism to reclaim proof size weight. + 1. Introduces a new `SignedExtension` that reclaims the difference + between benchmarked proof size weight and actual consumed proof size weight. + 2. Introduces a manual mechanism, `StorageWeightReclaimer`, to reclaim excess storage weight for situations + that require manual weight management. The most prominent case is the `on_idle` hook. + 3. Adds the `storage_proof_size` host function to the PVF. Parachain nodes should add it to ensure compatibility. + + To enable proof size reclaiming, add the host `storage_proof_size` host function to the parachain node. Add the + `StorageWeightReclaim` `SignedExtension` to your runtime and enable proof recording during block import. + + +crates: + - name: "cumulus-primitives-storage-weight-reclaim" +host_functions: + - name: "storage_proof_size" + description: | + This host function is used to pass the current size of the storage proof to the runtime. + It was introduced before but becomes relevant now. + Note: This host function is intended to be used through `cumulus_primitives_storage_weight_reclaim::get_proof_size`. + Direct usage is not recommended. diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 069217bcee46..1405c7303f87 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -148,6 +148,7 @@ use sp_io::TestExternalities; pub mod limits; #[cfg(test)] pub(crate) mod mock; + pub mod offchain; mod extensions; @@ -847,7 +848,7 @@ pub mod pallet { #[pallet::storage] #[pallet::whitelist_storage] #[pallet::getter(fn block_weight)] - pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; + pub type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] diff --git a/substrate/primitives/weights/src/weight_meter.rs b/substrate/primitives/weights/src/weight_meter.rs index 584d22304c3a..1738948e4c3c 100644 --- a/substrate/primitives/weights/src/weight_meter.rs +++ b/substrate/primitives/weights/src/weight_meter.rs @@ -149,6 +149,11 @@ impl WeightMeter { pub fn can_consume(&self, w: Weight) -> bool { self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) } + + /// Reclaim the given weight. + pub fn reclaim_proof_size(&mut self, s: u64) { + self.consumed.saturating_reduce(Weight::from_parts(0, s)); + } } #[cfg(test)] @@ -277,6 +282,21 @@ mod tests { assert_eq!(meter.consumed(), Weight::from_parts(5, 10)); } + #[test] + #[cfg(debug_assertions)] + fn reclaim_works() { + let mut meter = WeightMeter::with_limit(Weight::from_parts(5, 10)); + + meter.consume(Weight::from_parts(5, 10)); + assert_eq!(meter.consumed(), Weight::from_parts(5, 10)); + + meter.reclaim_proof_size(3); + assert_eq!(meter.consumed(), Weight::from_parts(5, 7)); + + meter.reclaim_proof_size(10); + assert_eq!(meter.consumed(), Weight::from_parts(5, 0)); + } + #[test] #[cfg(debug_assertions)] #[should_panic(expected = "Weight counter overflow")] diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index e3f06e275635..d283b24f286a 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -72,6 +72,7 @@ pub struct TestClientBuilder, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, + enable_import_proof_recording: bool, no_genesis: bool, } @@ -120,6 +121,7 @@ impl bad_blocks: None, enable_offchain_indexing_api: false, no_genesis: false, + enable_import_proof_recording: false, } } @@ -165,6 +167,12 @@ impl self } + /// Enable proof recording on import. + pub fn enable_import_proof_recording(mut self) -> Self { + self.enable_import_proof_recording = true; + self + } + /// Disable writing genesis. pub fn set_no_genesis(mut self) -> Self { self.no_genesis = true; @@ -202,6 +210,7 @@ impl }; let client_config = ClientConfig { + enable_import_proof_recording: self.enable_import_proof_recording, offchain_indexing_api: self.enable_offchain_indexing_api, no_genesis: self.no_genesis, ..Default::default() From 6fc1d41d4487b9164451cd8214674ce195ab06a0 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 23 Feb 2024 16:53:08 +0100 Subject: [PATCH 32/51] Bridge zombienet tests: move all "framework" files under one folder (#3462) Related to https://github.com/paritytech/polkadot-sdk/issues/3400 Moving all bridges testing "framework" files under one folder in order to be able to download the entire folder when we want to add tests in other repos No significant functional changes --- .../rococo-westend/bridges_rococo_westend.sh | 2 +- bridges/testing/environments/rococo-westend/helper.sh | 2 +- .../environments/rococo-westend/rococo-init.zndsl | 4 ++-- .../testing/environments/rococo-westend/rococo.zndsl | 4 ++-- bridges/testing/environments/rococo-westend/spawn.sh | 2 +- .../environments/rococo-westend/start_relayer.sh | 4 ++-- .../environments/rococo-westend/westend-init.zndsl | 4 ++-- .../testing/environments/rococo-westend/westend.zndsl | 4 ++-- .../best-finalized-header-at-bridged-chain.js | 0 .../js-helpers/chains/rococo-at-westend.js | 0 .../js-helpers/chains/westend-at-rococo.js | 0 .../js-helpers/native-assets-balance-increased.js | 0 .../only-mandatory-headers-synced-when-idle.js | 0 .../only-required-headers-synced-when-idle.js | 0 .../{ => framework}/js-helpers/relayer-rewards.js | 0 bridges/testing/{ => framework}/js-helpers/utils.js | 0 .../js-helpers/wait-hrmp-channel-opened.js | 0 .../js-helpers/wrapped-assets-balance.js | 0 bridges/testing/{ => framework}/utils/bridges.sh | 4 ++-- bridges/testing/{ => framework}/utils/common.sh | 0 .../utils/generate_hex_encoded_call/index.js | 0 .../utils/generate_hex_encoded_call/package-lock.json | 0 .../utils/generate_hex_encoded_call/package.json | 0 bridges/testing/{ => framework}/utils/zombienet.sh | 0 bridges/testing/run-new-test.sh | 1 + .../0001-asset-transfer/roc-reaches-westend.zndsl | 8 ++++---- bridges/testing/tests/0001-asset-transfer/run.sh | 8 +++++--- .../tests/0001-asset-transfer/wnd-reaches-rococo.zndsl | 8 ++++---- .../0001-asset-transfer/wroc-reaches-rococo.zndsl | 6 +++--- .../0001-asset-transfer/wwnd-reaches-westend.zndsl | 6 +++--- .../rococo-to-westend.zndsl | 4 ++-- .../0002-mandatory-headers-synced-while-idle/run.sh | 10 +++++----- .../westend-to-rococo.zndsl | 4 ++-- .../bridges_zombienet_tests_injected.Dockerfile | 2 +- 34 files changed, 45 insertions(+), 42 deletions(-) rename bridges/testing/{ => framework}/js-helpers/best-finalized-header-at-bridged-chain.js (100%) rename bridges/testing/{ => framework}/js-helpers/chains/rococo-at-westend.js (100%) rename bridges/testing/{ => framework}/js-helpers/chains/westend-at-rococo.js (100%) rename bridges/testing/{ => framework}/js-helpers/native-assets-balance-increased.js (100%) rename bridges/testing/{ => framework}/js-helpers/only-mandatory-headers-synced-when-idle.js (100%) rename bridges/testing/{ => framework}/js-helpers/only-required-headers-synced-when-idle.js (100%) rename bridges/testing/{ => framework}/js-helpers/relayer-rewards.js (100%) rename bridges/testing/{ => framework}/js-helpers/utils.js (100%) rename bridges/testing/{ => framework}/js-helpers/wait-hrmp-channel-opened.js (100%) rename bridges/testing/{ => framework}/js-helpers/wrapped-assets-balance.js (100%) rename bridges/testing/{ => framework}/utils/bridges.sh (98%) rename bridges/testing/{ => framework}/utils/common.sh (100%) rename bridges/testing/{ => framework}/utils/generate_hex_encoded_call/index.js (100%) rename bridges/testing/{ => framework}/utils/generate_hex_encoded_call/package-lock.json (100%) rename bridges/testing/{ => framework}/utils/generate_hex_encoded_call/package.json (100%) rename bridges/testing/{ => framework}/utils/zombienet.sh (100%) diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 84764cdaca38..de5be2e0af88 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -1,7 +1,7 @@ #!/bin/bash # import common functions -source "${BASH_SOURCE%/*}/../../utils/bridges.sh" +source "$FRAMEWORK_PATH/utils/bridges.sh" # Expected sovereign accounts. # diff --git a/bridges/testing/environments/rococo-westend/helper.sh b/bridges/testing/environments/rococo-westend/helper.sh index 211a5b53b3d9..0a13ded213f5 100755 --- a/bridges/testing/environments/rococo-westend/helper.sh +++ b/bridges/testing/environments/rococo-westend/helper.sh @@ -1,3 +1,3 @@ #!/bin/bash -$POLKADOT_SDK_PATH/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh "$@" +$ENV_PATH/bridges_rococo_westend.sh "$@" diff --git a/bridges/testing/environments/rococo-westend/rococo-init.zndsl b/bridges/testing/environments/rococo-westend/rococo-init.zndsl index 145f2df73a6e..c913e4db31f4 100644 --- a/bridges/testing/environments/rococo-westend/rococo-init.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo-init.zndsl @@ -1,8 +1,8 @@ -Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back +Description: Check if the HRMP channel between Rococo BH and Rococo AH was opened successfully Network: ./bridge_hub_rococo_local_network.toml Creds: config # ensure that initialization has completed -asset-hub-rococo-collator1: js-script ../../js-helpers/wait-hrmp-channel-opened.js with "1013" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1013" within 300 seconds diff --git a/bridges/testing/environments/rococo-westend/rococo.zndsl b/bridges/testing/environments/rococo-westend/rococo.zndsl index bd8681af2196..5b49c7c632fa 100644 --- a/bridges/testing/environments/rococo-westend/rococo.zndsl +++ b/bridges/testing/environments/rococo-westend/rococo.zndsl @@ -1,7 +1,7 @@ -Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back +Description: Check if the with-Westend GRANPDA pallet was initialized at Rococo BH Network: ./bridge_hub_rococo_local_network.toml Creds: config # relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../../js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index 5a0d65ce65db..cbd0b1bc623a 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -4,7 +4,7 @@ set -e trap "trap - SIGTERM && kill -9 -$$" SIGINT SIGTERM EXIT -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "$FRAMEWORK_PATH/utils/zombienet.sh" # whether to init the chains (open HRMP channels, set XCM version, create reserve assets, etc) init=0 diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index c57d4f1a4374..7ddd312d395a 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -2,8 +2,8 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "$FRAMEWORK_PATH/utils/common.sh" +source "$FRAMEWORK_PATH/utils/zombienet.sh" rococo_dir=$1 westend_dir=$2 diff --git a/bridges/testing/environments/rococo-westend/westend-init.zndsl b/bridges/testing/environments/rococo-westend/westend-init.zndsl index 2f8e665d592d..0f5428eed3b0 100644 --- a/bridges/testing/environments/rococo-westend/westend-init.zndsl +++ b/bridges/testing/environments/rococo-westend/westend-init.zndsl @@ -1,7 +1,7 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back +Description: Check if the HRMP channel between Westend BH and Westend AH was opened successfully Network: ./bridge_hub_westend_local_network.toml Creds: config # ensure that initialization has completed -asset-hub-westend-collator1: js-script ../../js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds diff --git a/bridges/testing/environments/rococo-westend/westend.zndsl b/bridges/testing/environments/rococo-westend/westend.zndsl index c75ae579d27a..07968838852f 100644 --- a/bridges/testing/environments/rococo-westend/westend.zndsl +++ b/bridges/testing/environments/rococo-westend/westend.zndsl @@ -1,6 +1,6 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back +Description: Check if the with-Rococo GRANPDA pallet was initialized at Westend BH Network: ./bridge_hub_westend_local_network.toml Creds: config # relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend -bridge-hub-westend-collator1: js-script ../../js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds diff --git a/bridges/testing/js-helpers/best-finalized-header-at-bridged-chain.js b/bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js similarity index 100% rename from bridges/testing/js-helpers/best-finalized-header-at-bridged-chain.js rename to bridges/testing/framework/js-helpers/best-finalized-header-at-bridged-chain.js diff --git a/bridges/testing/js-helpers/chains/rococo-at-westend.js b/bridges/testing/framework/js-helpers/chains/rococo-at-westend.js similarity index 100% rename from bridges/testing/js-helpers/chains/rococo-at-westend.js rename to bridges/testing/framework/js-helpers/chains/rococo-at-westend.js diff --git a/bridges/testing/js-helpers/chains/westend-at-rococo.js b/bridges/testing/framework/js-helpers/chains/westend-at-rococo.js similarity index 100% rename from bridges/testing/js-helpers/chains/westend-at-rococo.js rename to bridges/testing/framework/js-helpers/chains/westend-at-rococo.js diff --git a/bridges/testing/js-helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js similarity index 100% rename from bridges/testing/js-helpers/native-assets-balance-increased.js rename to bridges/testing/framework/js-helpers/native-assets-balance-increased.js diff --git a/bridges/testing/js-helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js similarity index 100% rename from bridges/testing/js-helpers/only-mandatory-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js diff --git a/bridges/testing/js-helpers/only-required-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js similarity index 100% rename from bridges/testing/js-helpers/only-required-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/only-required-headers-synced-when-idle.js diff --git a/bridges/testing/js-helpers/relayer-rewards.js b/bridges/testing/framework/js-helpers/relayer-rewards.js similarity index 100% rename from bridges/testing/js-helpers/relayer-rewards.js rename to bridges/testing/framework/js-helpers/relayer-rewards.js diff --git a/bridges/testing/js-helpers/utils.js b/bridges/testing/framework/js-helpers/utils.js similarity index 100% rename from bridges/testing/js-helpers/utils.js rename to bridges/testing/framework/js-helpers/utils.js diff --git a/bridges/testing/js-helpers/wait-hrmp-channel-opened.js b/bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js similarity index 100% rename from bridges/testing/js-helpers/wait-hrmp-channel-opened.js rename to bridges/testing/framework/js-helpers/wait-hrmp-channel-opened.js diff --git a/bridges/testing/js-helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js similarity index 100% rename from bridges/testing/js-helpers/wrapped-assets-balance.js rename to bridges/testing/framework/js-helpers/wrapped-assets-balance.js diff --git a/bridges/testing/utils/bridges.sh b/bridges/testing/framework/utils/bridges.sh similarity index 98% rename from bridges/testing/utils/bridges.sh rename to bridges/testing/framework/utils/bridges.sh index cfde5dfd26b7..7c8399461584 100755 --- a/bridges/testing/utils/bridges.sh +++ b/bridges/testing/framework/utils/bridges.sh @@ -41,8 +41,8 @@ function ensure_polkadot_js_api() { echo "" echo "" echo "-------------------" - echo "Installing (nodejs) sub module: $(dirname "$0")/generate_hex_encoded_call" - pushd $(dirname "$0")/generate_hex_encoded_call + echo "Installing (nodejs) sub module: ${BASH_SOURCE%/*}/generate_hex_encoded_call" + pushd ${BASH_SOURCE%/*}/generate_hex_encoded_call npm install popd fi diff --git a/bridges/testing/utils/common.sh b/bridges/testing/framework/utils/common.sh similarity index 100% rename from bridges/testing/utils/common.sh rename to bridges/testing/framework/utils/common.sh diff --git a/bridges/testing/utils/generate_hex_encoded_call/index.js b/bridges/testing/framework/utils/generate_hex_encoded_call/index.js similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/index.js rename to bridges/testing/framework/utils/generate_hex_encoded_call/index.js diff --git a/bridges/testing/utils/generate_hex_encoded_call/package-lock.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/package-lock.json rename to bridges/testing/framework/utils/generate_hex_encoded_call/package-lock.json diff --git a/bridges/testing/utils/generate_hex_encoded_call/package.json b/bridges/testing/framework/utils/generate_hex_encoded_call/package.json similarity index 100% rename from bridges/testing/utils/generate_hex_encoded_call/package.json rename to bridges/testing/framework/utils/generate_hex_encoded_call/package.json diff --git a/bridges/testing/utils/zombienet.sh b/bridges/testing/framework/utils/zombienet.sh similarity index 100% rename from bridges/testing/utils/zombienet.sh rename to bridges/testing/framework/utils/zombienet.sh diff --git a/bridges/testing/run-new-test.sh b/bridges/testing/run-new-test.sh index 2ed2a412b8a7..7c84a69aa47d 100755 --- a/bridges/testing/run-new-test.sh +++ b/bridges/testing/run-new-test.sh @@ -21,6 +21,7 @@ do done export POLKADOT_SDK_PATH=`realpath ${BASH_SOURCE%/*}/../..` +export FRAMEWORK_PATH=`realpath ${BASH_SOURCE%/*}/framework` # set path to binaries if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 203c95b73eb2..4725362ae826 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -1,12 +1,12 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # send ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run ../../environments/rococo-westend/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds # check that //Alice received the ROC on Westend AH -asset-hub-westend-collator1: js-script ../../js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds # check that the relayer //Charlie is rewarded by Westend AH -bridge-hub-westend-collator1: js-script ../../js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh index 1e0d6cad3010..a7bb122919b4 100755 --- a/bridges/testing/tests/0001-asset-transfer/run.sh +++ b/bridges/testing/tests/0001-asset-transfer/run.sh @@ -2,10 +2,12 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/common.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh" -${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh --init --start-relayer & +export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend` + +$ENV_PATH/spawn.sh --init --start-relayer & env_pid=$! ensure_process_file $env_pid $TEST_DIR/rococo.env 600 diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index bbd95db9cfda..77267239c3b1 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -1,12 +1,12 @@ Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_rococo_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config # send WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run ../../environments/rococo-westend/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds # check that //Alice received the WND on Rococo AH -asset-hub-rococo-collator1: js-script ../../js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds # check that the relayer //Charlie is rewarded by Rococo AH -bridge-hub-rococo-collator1: js-script ../../js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl index 4c0a4675234e..f72b76e6026b 100644 --- a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl @@ -1,10 +1,10 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # send wROC back to Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run ../../environments/rococo-westend/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds # check that //Alice received the wROC on Rococo AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-rococo-collator1: js-script ../../js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl index 3acded97d5cc..9d893c8d90a8 100644 --- a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl @@ -1,10 +1,10 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # send wWND back to Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run ../../environments/rococo-westend/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds # check that //Alice received the wWND on Westend AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-westend-collator1: js-script ../../js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl index 82a1a103b14a..6e381f537732 100644 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl @@ -1,8 +1,8 @@ Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: ../../environments/rococo-westend/bridge_hub_westend_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were # generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script ../../js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh index c39796d51d56..7d5b8d927366 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh @@ -2,12 +2,12 @@ set -e -source "${BASH_SOURCE%/*}/../../utils/common.sh" -source "${BASH_SOURCE%/*}/../../utils/zombienet.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/common.sh" +source "${BASH_SOURCE%/*}/../../framework/utils/zombienet.sh" -# We use `--relayer-delay` in order to sleep some time before starting relayer. -# We want to sleep for at least 1 session, which is expected to be 60 seconds for test environment. -${BASH_SOURCE%/*}/../../environments/rococo-westend/spawn.sh & +export ENV_PATH=`realpath ${BASH_SOURCE%/*}/../../environments/rococo-westend` + +$ENV_PATH/spawn.sh & env_pid=$! ensure_process_file $env_pid $TEST_DIR/rococo.env 600 diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl index 865813246252..b4b3e4367916 100644 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl +++ b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl @@ -1,7 +1,7 @@ Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: ../../environments/rococo-westend/bridge_hub_rococo_local_network.toml +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config # ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were # generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script ../../js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index fde9cc6e7cf3..4bfb73acda05 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -45,7 +45,7 @@ RUN mkdir -p /home/nonroot/bridges-polkadot-sdk COPY ./artifacts/bridges-polkadot-sdk /home/nonroot/bridges-polkadot-sdk # also prepare `generate_hex_encoded_call` for running RUN set -eux; \ - cd /home/nonroot/bridges-polkadot-sdk/bridges/testing/utils/generate_hex_encoded_call; \ + cd /home/nonroot/bridges-polkadot-sdk/bridges/testing/framework/utils/generate_hex_encoded_call; \ npm install # check if executable works in this container From 2431001ec012334226bc181d69a754c49f168328 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 23 Feb 2024 23:35:48 +0700 Subject: [PATCH 33/51] Runtime: allow backing multiple candidates of same parachain on different cores (#3231) Fixes https://github.com/paritytech/polkadot-sdk/issues/3144 Builds on top of https://github.com/paritytech/polkadot-sdk/pull/3229 ### Summary Some preparations for Runtime to support elastic scaling, guarded by config node features bit `FeatureIndex::ElasticScalingMVP`. This PR introduces a per-candidate `CoreIndex` but does it in a hacky way to avoid changing `CandidateCommitments`, `CandidateReceipts` primitives and networking protocols. #### Including `CoreIndex` in `BackedCandidate` If the `ElasticScalingMVP` feature bit is enabled then `BackedCandidate::validator_indices` is extended by 8 bits. The value stored in these bits represents the assumed core index for the candidate. It is temporary solution which works by creating a mapping from `BackedCandidate` to `CoreIndex` by assuming the `CoreIndex` can be discovered by checking in which validator group the validator that signed the statement is. TODO: - [x] fix tests - [x] add new tests - [x] Bump runtime API for Kusama, so we have that node features thing! -> https://github.com/polkadot-fellows/runtimes/pull/194 --------- Signed-off-by: Andrei Sandu Signed-off-by: alindima Co-authored-by: alindima --- .gitlab/pipeline/zombienet/polkadot.yml | 8 + Cargo.lock | 37 ++ polkadot/node/core/backing/Cargo.toml | 1 + polkadot/node/core/backing/src/lib.rs | 19 +- polkadot/node/core/backing/src/tests/mod.rs | 70 +- polkadot/node/core/provisioner/src/lib.rs | 2 +- polkadot/node/core/provisioner/src/tests.rs | 70 +- polkadot/primitives/src/v6/mod.rs | 208 +++++- polkadot/runtime/parachains/Cargo.toml | 1 + polkadot/runtime/parachains/src/builder.rs | 7 +- .../runtime/parachains/src/inclusion/mod.rs | 91 +-- .../runtime/parachains/src/inclusion/tests.rs | 423 +++++++++--- .../src/paras_inherent/benchmarking.rs | 4 +- .../parachains/src/paras_inherent/mod.rs | 288 ++++++--- .../parachains/src/paras_inherent/tests.rs | 604 ++++++++++++++++-- .../parachains/src/paras_inherent/weights.rs | 4 +- .../functional/0012-elastic-scaling-mvp.toml | 38 ++ .../functional/0012-elastic-scaling-mvp.zndsl | 28 + .../functional/0012-enable-node-feature.js | 37 ++ .../functional/0012-register-para.js | 37 ++ prdoc/pr_3231.prdoc | 11 + 21 files changed, 1640 insertions(+), 348 deletions(-) create mode 100644 polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml create mode 100644 polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl create mode 100644 polkadot/zombienet_tests/functional/0012-enable-node-feature.js create mode 100644 polkadot/zombienet_tests/functional/0012-register-para.js create mode 100644 prdoc/pr_3231.prdoc diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 54eb6db48cae..97572f029d00 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -158,6 +158,14 @@ zombienet-polkadot-functional-0011-async-backing-6-seconds-rate: --local-dir="${LOCAL_DIR}/functional" --test="0011-async-backing-6-seconds-rate.zndsl" +zombienet-polkadot-functional-0012-elastic-scaling-mvp: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0012-elastic-scaling-mvp.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/Cargo.lock b/Cargo.lock index fbe2729acba1..a23be53b34fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12512,6 +12512,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-statement-table", + "rstest", "sc-keystore", "schnellru", "sp-application-crypto", @@ -13350,6 +13351,7 @@ dependencies = [ "polkadot-runtime-metrics", "rand", "rand_chacha 0.3.1", + "rstest", "rustc-hex", "sc-keystore", "scale-info", @@ -14790,6 +14792,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +[[package]] +name = "relative-path" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e898588f33fdd5b9420719948f9f2a32c922a246964576f71ba7f24f80610fbc" + [[package]] name = "remote-ext-tests-bags-list" version = "1.0.0" @@ -15172,6 +15180,35 @@ dependencies = [ "winapi", ] +[[package]] +name = "rstest" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97eeab2f3c0a199bc4be135c36c924b6590b88c377d416494288c14f2db30199" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.0", +] + +[[package]] +name = "rstest_macros" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" +dependencies = [ + "cfg-if", + "glob", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.0", + "syn 2.0.50", + "unicode-ident", +] + [[package]] name = "rtnetlink" version = "0.10.1" diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index f71b8df80dd2..d0c1f9aa4832 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -32,5 +32,6 @@ sc-keystore = { path = "../../../../substrate/client/keystore" } sp-tracing = { path = "../../../../substrate/primitives/tracing" } futures = { version = "0.3.21", features = ["thread-pool"] } assert_matches = "1.4.0" +rstest = "0.18.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 26f20a6d48ef..69bf2e956a0f 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -70,7 +70,7 @@ use std::{ sync::Arc, }; -use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, future::BoxFuture, @@ -494,20 +494,15 @@ fn table_attested_to_backed( } vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - if inject_core_index { - let core_index_to_inject: BitVec = - BitVec::from_vec(vec![core_index.0 as u8]); - validator_indices.extend(core_index_to_inject); - } - - Some(BackedCandidate { + Some(BackedCandidate::new( candidate, - validity_votes: vote_positions + vote_positions .into_iter() .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) .collect(), validator_indices, - }) + inject_core_index.then_some(core_index), + )) } async fn store_available_data( @@ -1775,7 +1770,7 @@ async fn post_import_statement_actions( &rp_state.table_context, rp_state.inject_core_index, ) { - let para_id = backed.candidate.descriptor.para_id; + let para_id = backed.candidate().descriptor.para_id; gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1796,7 +1791,7 @@ async fn post_import_statement_actions( // notify collator protocol. ctx.send_message(CollatorProtocolMessage::Backed { para_id, - para_head: backed.candidate.descriptor.para_head, + para_head: backed.candidate().descriptor.para_head, }) .await; // Notify statement distribution of backed candidate. diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 7223f1e1dfb0..e3cc5727435a 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,9 +33,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, - ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, + vstaging::node_features, CandidateDescriptor, GroupRotationInfo, HeadData, + PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; +use rstest::rstest; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; @@ -79,6 +80,7 @@ pub(crate) struct TestState { relay_parent: Hash, minimum_backing_votes: u32, disabled_validators: Vec, + node_features: NodeFeatures, } impl TestState { @@ -157,6 +159,7 @@ impl Default for TestState { relay_parent, minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, disabled_validators: Vec::new(), + node_features: Default::default(), } } } @@ -298,7 +301,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS AllMessages::RuntimeApi( RuntimeApiMessage::Request(_parent, RuntimeApiRequest::NodeFeatures(_session_index, tx)) ) => { - tx.send(Ok(Default::default())).unwrap(); + tx.send(Ok(test_state.node_features.clone())).unwrap(); } ); @@ -494,9 +497,20 @@ fn backing_second_works() { } // Test that the candidate reaches quorum successfully. -#[test] -fn backing_works() { - let test_state = TestState::default(); +#[rstest] +#[case(true)] +#[case(false)] +fn backing_works(#[case] elastic_scaling_mvp: bool) { + let mut test_state = TestState::default(); + if elastic_scaling_mvp { + test_state + .node_features + .resize((node_features::FeatureIndex::ElasticScalingMVP as u8 + 1) as usize, false); + test_state + .node_features + .set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true); + } + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; @@ -647,6 +661,31 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + vec![(candidate_a_hash, test_state.relay_parent)], + tx, + ); + + virtual_overseer.send(FromOrchestra::Communication { msg }).await; + + let candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + assert_eq!(candidates[0].validity_votes().len(), 3); + + let (validator_indices, maybe_core_index) = + candidates[0].validator_indices_and_core_index(elastic_scaling_mvp); + if elastic_scaling_mvp { + assert_eq!(maybe_core_index.unwrap(), CoreIndex(0)); + } else { + assert!(maybe_core_index.is_none()); + } + + assert_eq!( + validator_indices, + bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 1, 0, 1].as_bitslice() + ); + virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -919,20 +958,20 @@ fn backing_works_while_validation_ongoing() { let candidates = rx.await.unwrap(); assert_eq!(1, candidates.len()); - assert_eq!(candidates[0].validity_votes.len(), 3); + assert_eq!(candidates[0].validity_votes().len(), 3); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Implicit(signed_a.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_b.signature().clone()))); assert!(candidates[0] - .validity_votes + .validity_votes() .contains(&ValidityAttestation::Explicit(signed_c.signature().clone()))); assert_eq!( - candidates[0].validator_indices, - bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1], + candidates[0].validator_indices_and_core_index(false), + (bitvec::bitvec![u8, bitvec::order::Lsb0; 1, 0, 1, 1].as_bitslice(), None) ); virtual_overseer @@ -1604,8 +1643,11 @@ fn candidate_backing_reorders_votes() { let expected_attestations = vec![fake_attestation(1).into(), fake_attestation(3).into(), fake_attestation(5).into()]; - assert_eq!(backed.validator_indices, expected_bitvec); - assert_eq!(backed.validity_votes, expected_attestations); + assert_eq!( + backed.validator_indices_and_core_index(false), + (expected_bitvec.as_bitslice(), None) + ); + assert_eq!(backed.validity_votes(), expected_attestations); } // Test whether we retry on failed PoV fetching. diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index d98f6ebfe428..a29cf72afb14 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -766,7 +766,7 @@ async fn select_candidates( // keep only one candidate with validation code. let mut with_validation_code = false; candidates.retain(|c| { - if c.candidate.commitments.new_validation_code.is_some() { + if c.candidate().commitments.new_validation_code.is_some() { if with_validation_code { return false } diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs index b26df8ddb910..87c0e7a65d35 100644 --- a/polkadot/node/core/provisioner/src/tests.rs +++ b/polkadot/node/core/provisioner/src/tests.rs @@ -460,13 +460,16 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); @@ -486,7 +489,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -532,10 +535,13 @@ mod select_candidates { // Build possible outputs from select_candidates let backed_candidates: Vec<_> = committed_receipts .iter() - .map(|committed_receipt| BackedCandidate { - candidate: committed_receipt.clone(), - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|committed_receipt| { + BackedCandidate::new( + committed_receipt.clone(), + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); @@ -566,7 +572,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_backed_filtered.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -605,13 +611,16 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); @@ -631,7 +640,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) @@ -671,13 +680,16 @@ mod select_candidates { let expected_backed = expected_candidates .iter() - .map(|c| BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: c.descriptor.clone(), - commitments: Default::default(), - }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(MOCK_GROUP_SIZE), + .map(|c| { + BackedCandidate::new( + CommittedCandidateReceipt { + descriptor: c.descriptor().clone(), + commitments: Default::default(), + }, + Vec::new(), + default_bitvec(MOCK_GROUP_SIZE), + None, + ) }) .collect(); @@ -697,7 +709,7 @@ mod select_candidates { result.into_iter().for_each(|c| { assert!( - expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)), "Failed to find candidate: {:?}", c, ) diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 538eb3855848..89431f7801f7 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -16,7 +16,7 @@ //! `V6` Primitives. -use bitvec::vec::BitVec; +use bitvec::{field::BitField, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_std::{ @@ -707,19 +707,50 @@ pub type UncheckedSignedAvailabilityBitfields = Vec { /// The candidate referred to. - pub candidate: CommittedCandidateReceipt, + candidate: CommittedCandidateReceipt, /// The validity votes themselves, expressed as signatures. - pub validity_votes: Vec, - /// The indices of the validators within the group, expressed as a bitfield. - pub validator_indices: BitVec, + validity_votes: Vec, + /// The indices of the validators within the group, expressed as a bitfield. May be extended + /// beyond the backing group size to contain the assigned core index, if ElasticScalingMVP is + /// enabled. + validator_indices: BitVec, } impl BackedCandidate { - /// Get a reference to the descriptor of the para. + /// Constructor + pub fn new( + candidate: CommittedCandidateReceipt, + validity_votes: Vec, + validator_indices: BitVec, + core_index: Option, + ) -> Self { + let mut instance = Self { candidate, validity_votes, validator_indices }; + if let Some(core_index) = core_index { + instance.inject_core_index(core_index); + } + instance + } + + /// Get a reference to the descriptor of the candidate. pub fn descriptor(&self) -> &CandidateDescriptor { &self.candidate.descriptor } + /// Get a reference to the committed candidate receipt of the candidate. + pub fn candidate(&self) -> &CommittedCandidateReceipt { + &self.candidate + } + + /// Get a reference to the validity votes of the candidate. + pub fn validity_votes(&self) -> &[ValidityAttestation] { + &self.validity_votes + } + + /// Get a mutable reference to validity votes of the para. + pub fn validity_votes_mut(&mut self) -> &mut Vec { + &mut self.validity_votes + } + /// Compute this candidate's hash. pub fn hash(&self) -> CandidateHash where @@ -735,6 +766,48 @@ impl BackedCandidate { { self.candidate.to_plain() } + + /// Get a copy of the validator indices and the assumed core index, if any. + pub fn validator_indices_and_core_index( + &self, + core_index_enabled: bool, + ) -> (&BitSlice, Option) { + // This flag tells us if the block producers must enable Elastic Scaling MVP hack. + // It extends `BackedCandidate::validity_indices` to store a 8 bit core index. + if core_index_enabled { + let core_idx_offset = self.validator_indices.len().saturating_sub(8); + if core_idx_offset > 0 { + let (validator_indices_slice, core_idx_slice) = + self.validator_indices.split_at(core_idx_offset); + return ( + validator_indices_slice, + Some(CoreIndex(core_idx_slice.load::() as u32)), + ); + } + } + + (&self.validator_indices, None) + } + + /// Inject a core index in the validator_indices bitvec. + fn inject_core_index(&mut self, core_index: CoreIndex) { + let core_index_to_inject: BitVec = + BitVec::from_vec(vec![core_index.0 as u8]); + self.validator_indices.extend(core_index_to_inject); + } + + /// Update the validator indices and core index in the candidate. + pub fn set_validator_indices_and_core_index( + &mut self, + new_indices: BitVec, + maybe_core_index: Option, + ) { + self.validator_indices = new_indices; + + if let Some(core_index) = maybe_core_index { + self.inject_core_index(core_index); + } + } } /// Verify the backing of the given candidate. @@ -748,44 +821,42 @@ impl BackedCandidate { /// Returns either an error, indicating that one of the signatures was invalid or that the index /// was out-of-bounds, or the number of signatures checked. pub fn check_candidate_backing + Clone + Encode + core::fmt::Debug>( - backed: &BackedCandidate, + candidate_hash: CandidateHash, + validity_votes: &[ValidityAttestation], + validator_indices: &BitSlice, signing_context: &SigningContext, group_len: usize, validator_lookup: impl Fn(usize) -> Option, ) -> Result { - if backed.validator_indices.len() != group_len { + if validator_indices.len() != group_len { log::debug!( target: LOG_TARGET, "Check candidate backing: indices mismatch: group_len = {} , indices_len = {}", group_len, - backed.validator_indices.len(), + validator_indices.len(), ); return Err(()) } - if backed.validity_votes.len() > group_len { + if validity_votes.len() > group_len { log::debug!( target: LOG_TARGET, "Check candidate backing: Too many votes, expected: {}, found: {}", group_len, - backed.validity_votes.len(), + validity_votes.len(), ); return Err(()) } - // this is known, even in runtime, to be blake2-256. - let hash = backed.candidate.hash(); - let mut signed = 0; - for ((val_in_group_idx, _), attestation) in backed - .validator_indices + for ((val_in_group_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed.validity_votes.iter()) + .zip(validity_votes.iter()) { let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash, signing_context); + let payload = attestation.signed_payload(candidate_hash, signing_context); let sig = attestation.signature(); if sig.verify(&payload[..], &validator_id) { @@ -801,11 +872,11 @@ pub fn check_candidate_backing + Clone + Encode + core::fmt::Debu } } - if signed != backed.validity_votes.len() { + if signed != validity_votes.len() { log::error!( target: LOG_TARGET, "Check candidate backing: Too many signatures, expected = {}, found = {}", - backed.validity_votes.len() , + validity_votes.len(), signed, ); return Err(()) @@ -1884,6 +1955,34 @@ pub enum PvfExecKind { #[cfg(test)] mod tests { use super::*; + use bitvec::bitvec; + use primitives::sr25519; + + pub fn dummy_committed_candidate_receipt() -> CommittedCandidateReceipt { + let zeros = Hash::zero(); + + CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: 0.into(), + relay_parent: zeros, + collator: CollatorId::from(sr25519::Public::from_raw([0; 32])), + persisted_validation_data_hash: zeros, + pov_hash: zeros, + erasure_root: zeros, + signature: CollatorSignature::from(sr25519::Signature([0u8; 64])), + para_head: zeros, + validation_code_hash: ValidationCode(vec![1, 2, 3, 4, 5, 6, 7, 8, 9]).hash(), + }, + commitments: CandidateCommitments { + head_data: HeadData(vec![]), + upward_messages: vec![].try_into().expect("empty vec fits within bounds"), + new_validation_code: None, + horizontal_messages: vec![].try_into().expect("empty vec fits within bounds"), + processed_downward_messages: 0, + hrmp_watermark: 0_u32, + }, + } + } #[test] fn group_rotation_info_calculations() { @@ -1958,4 +2057,73 @@ mod tests { assert!(zero_b.leading_zeros() >= zero_u.leading_zeros()); } + + #[test] + fn test_backed_candidate_injected_core_index() { + let initial_validator_indices = bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]; + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + initial_validator_indices.clone(), + None, + ); + + // No core index supplied, ElasticScalingMVP is off. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + // No core index supplied, ElasticScalingMVP is on. Still, decoding will be ok if backing + // group size is <= 8, to give a chance to parachains that don't have multiple cores + // assigned. + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, initial_validator_indices.as_bitslice()); + assert!(core_index.is_none()); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + + // No core index supplied, ElasticScalingMVP is on. Decoding is corrupted if backing group + // size larger than 8. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0], + None, + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0].as_bitslice()); + assert!(core_index.is_some()); + + // Core index supplied, ElasticScalingMVP is off. Core index will be treated as normal + // validator indices. Runtime will check against this. + let candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(false); + assert_eq!( + validator_indices, + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0] + ); + assert!(core_index.is_none()); + + // Core index supplied, ElasticScalingMVP is on. + let mut candidate = BackedCandidate::new( + dummy_committed_candidate_receipt(), + vec![], + bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1], + Some(CoreIndex(10)), + ); + let (validator_indices, core_index) = candidate.validator_indices_and_core_index(true); + assert_eq!(validator_indices, bitvec![u8, bitvec::order::Lsb0; 0, 1, 0, 1]); + assert_eq!(core_index, Some(CoreIndex(10))); + + let encoded_validator_indices = candidate.validator_indices.clone(); + candidate.set_validator_indices_and_core_index(validator_indices.into(), core_index); + assert_eq!(candidate.validator_indices, encoded_validator_indices); + } } diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 311a62b6c917..610401454763 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -69,6 +69,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-crypto-hashing = { path = "../../../substrate/primitives/crypto/hashing" } thousands = "0.2.0" assert_matches = "1" +rstest = "0.18.2" serde_json = { workspace = true, default-features = true } [features] diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index 016b3fca589a..500bc70cfa75 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -587,11 +587,12 @@ impl BenchBuilder { }) .collect(); - BackedCandidate:: { + BackedCandidate::::new( candidate, validity_votes, - validator_indices: bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], - } + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()], + None, + ) }) .collect() } diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 90af9cde00a8..16e2e93b5617 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -47,10 +47,7 @@ use scale_info::TypeInfo; use sp_runtime::{traits::One, DispatchError, SaturatedConversion, Saturating}; #[cfg(feature = "std")] use sp_std::fmt; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, -}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use pallet::*; @@ -601,18 +598,16 @@ impl Pallet { /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( allowed_relay_parents: &AllowedRelayParentsTracker>, - candidates: Vec>, - scheduled: &BTreeMap, + candidates: Vec<(BackedCandidate, CoreIndex)>, group_validators: GV, + core_index_enabled: bool, ) -> Result, DispatchError> where GV: Fn(GroupIndex) -> Option>, { let now = >::block_number(); - ensure!(candidates.len() <= scheduled.len(), Error::::UnscheduledCandidate); - - if scheduled.is_empty() { + if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } @@ -648,7 +643,7 @@ impl Pallet { // // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. - for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { + for (candidate_idx, (backed_candidate, core_index)) in candidates.iter().enumerate() { let relay_parent_hash = backed_candidate.descriptor().relay_parent; let para_id = backed_candidate.descriptor().para_id; @@ -663,7 +658,7 @@ impl Pallet { let relay_parent_number = match check_ctx.verify_backed_candidate( &allowed_relay_parents, candidate_idx, - backed_candidate, + backed_candidate.candidate(), )? { Err(FailedToCreatePVD) => { log::debug!( @@ -679,11 +674,22 @@ impl Pallet { Ok(rpn) => rpn, }; - let para_id = backed_candidate.descriptor().para_id; + let (validator_indices, _) = + backed_candidate.validator_indices_and_core_index(core_index_enabled); + + log::debug!( + target: LOG_TARGET, + "Candidate {:?} on {:?}, + core_index_enabled = {}", + backed_candidate.hash(), + core_index, + core_index_enabled + ); + + check_assignment_in_order(core_index)?; + let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - let core_idx = *scheduled.get(¶_id).ok_or(Error::::UnscheduledCandidate)?; - check_assignment_in_order(core_idx)?; ensure!( >::get(¶_id).is_none() && >::get(¶_id).is_none(), @@ -694,7 +700,7 @@ impl Pallet { // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` // will always land in the current session. let group_idx = >::group_assigned_to_core( - core_idx, + *core_index, relay_parent_number + One::one(), ) .ok_or_else(|| { @@ -711,7 +717,9 @@ impl Pallet { // check the signatures in the backing and that it is a majority. { let maybe_amount_validated = primitives::check_candidate_backing( - &backed_candidate, + backed_candidate.candidate().hash(), + backed_candidate.validity_votes(), + validator_indices, &signing_context, group_vals.len(), |intra_group_vi| { @@ -738,16 +746,15 @@ impl Pallet { let mut backer_idx_and_attestation = Vec::<(ValidatorIndex, ValidityAttestation)>::with_capacity( - backed_candidate.validator_indices.count_ones(), + validator_indices.count_ones(), ); let candidate_receipt = backed_candidate.receipt(); - for ((bit_idx, _), attestation) in backed_candidate - .validator_indices + for ((bit_idx, _), attestation) in validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) { let val_idx = group_vals.get(bit_idx).expect("this query succeeded above; qed"); @@ -760,7 +767,7 @@ impl Pallet { } core_indices_and_backers.push(( - (core_idx, para_id), + (*core_index, para_id), backers, group_idx, relay_parent_number, @@ -772,7 +779,7 @@ impl Pallet { // one more sweep for actually writing to storage. let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); - for (candidate, (core, backers, group, relay_parent_number)) in + for ((candidate, _), (core, backers, group, relay_parent_number)) in candidates.into_iter().zip(core_indices_and_backers) { let para_id = candidate.descriptor().para_id; @@ -782,16 +789,18 @@ impl Pallet { bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; Self::deposit_event(Event::::CandidateBacked( - candidate.candidate.to_plain(), - candidate.candidate.commitments.head_data.clone(), + candidate.candidate().to_plain(), + candidate.candidate().commitments.head_data.clone(), core.0, group, )); - let candidate_hash = candidate.candidate.hash(); + let candidate_hash = candidate.candidate().hash(); - let (descriptor, commitments) = - (candidate.candidate.descriptor, candidate.candidate.commitments); + let (descriptor, commitments) = ( + candidate.candidate().descriptor.clone(), + candidate.candidate().commitments.clone(), + ); >::insert( ¶_id, @@ -1195,10 +1204,10 @@ impl CandidateCheckContext { &self, allowed_relay_parents: &AllowedRelayParentsTracker>, candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>, + backed_candidate_receipt: &CommittedCandidateReceipt<::Hash>, ) -> Result, FailedToCreatePVD>, Error> { - let para_id = backed_candidate.descriptor().para_id; - let relay_parent = backed_candidate.descriptor().relay_parent; + let para_id = backed_candidate_receipt.descriptor().para_id; + let relay_parent = backed_candidate_receipt.descriptor().relay_parent; // Check that the relay-parent is one of the allowed relay-parents. let (relay_parent_storage_root, relay_parent_number) = { @@ -1223,13 +1232,13 @@ impl CandidateCheckContext { let expected = persisted_validation_data.hash(); ensure!( - expected == backed_candidate.descriptor().persisted_validation_data_hash, + expected == backed_candidate_receipt.descriptor().persisted_validation_data_hash, Error::::ValidationDataHashMismatch, ); } ensure!( - backed_candidate.descriptor().check_collator_signature().is_ok(), + backed_candidate_receipt.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, ); @@ -1237,25 +1246,25 @@ impl CandidateCheckContext { // A candidate for a parachain without current validation code is not scheduled. .ok_or_else(|| Error::::UnscheduledCandidate)?; ensure!( - backed_candidate.descriptor().validation_code_hash == validation_code_hash, + backed_candidate_receipt.descriptor().validation_code_hash == validation_code_hash, Error::::InvalidValidationCodeHash, ); ensure!( - backed_candidate.descriptor().para_head == - backed_candidate.candidate.commitments.head_data.hash(), + backed_candidate_receipt.descriptor().para_head == + backed_candidate_receipt.commitments.head_data.hash(), Error::::ParaHeadMismatch, ); if let Err(err) = self.check_validation_outputs( para_id, relay_parent_number, - &backed_candidate.candidate.commitments.head_data, - &backed_candidate.candidate.commitments.new_validation_code, - backed_candidate.candidate.commitments.processed_downward_messages, - &backed_candidate.candidate.commitments.upward_messages, - BlockNumberFor::::from(backed_candidate.candidate.commitments.hrmp_watermark), - &backed_candidate.candidate.commitments.horizontal_messages, + &backed_candidate_receipt.commitments.head_data, + &backed_candidate_receipt.commitments.new_validation_code, + backed_candidate_receipt.commitments.processed_downward_messages, + &backed_candidate_receipt.commitments.upward_messages, + BlockNumberFor::::from(backed_candidate_receipt.commitments.hrmp_watermark), + &backed_candidate_receipt.commitments.horizontal_messages, ) { log::debug!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/inclusion/tests.rs b/polkadot/runtime/parachains/src/inclusion/tests.rs index 557b7b71a9e3..d2b5a67c3e45 100644 --- a/polkadot/runtime/parachains/src/inclusion/tests.rs +++ b/polkadot/runtime/parachains/src/inclusion/tests.rs @@ -120,6 +120,7 @@ pub(crate) fn back_candidate( keystore: &KeystorePtr, signing_context: &SigningContext, kind: BackingKind, + core_index: Option, ) -> BackedCandidate { let mut validator_indices = bitvec::bitvec![u8, BitOrderLsb0; 0; group.len()]; let threshold = effective_minimum_backing_votes( @@ -155,15 +156,20 @@ pub(crate) fn back_candidate( validity_votes.push(ValidityAttestation::Explicit(signature).into()); } - let backed = BackedCandidate { candidate, validity_votes, validator_indices }; + let backed = + BackedCandidate::new(candidate, validity_votes, validator_indices.clone(), core_index); - let successfully_backed = - primitives::check_candidate_backing(&backed, signing_context, group.len(), |i| { - Some(validators[group[i].0 as usize].public().into()) - }) - .ok() - .unwrap_or(0) >= - threshold; + let successfully_backed = primitives::check_candidate_backing( + backed.candidate().hash(), + backed.validity_votes(), + validator_indices.as_bitslice(), + signing_context, + group.len(), + |i| Some(validators[group[i].0 as usize].public().into()), + ) + .ok() + .unwrap_or(0) >= + threshold; match kind { BackingKind::Unanimous | BackingKind::Threshold => assert!(successfully_backed), @@ -919,38 +925,16 @@ fn candidate_checks() { let thread_a_assignment = (thread_a, CoreIndex::from(2)); let allowed_relay_parents = default_allowed_relay_parent_tracker(); - // unscheduled candidate. - { - let mut candidate = TestCandidateBuilder { - para_id: chain_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(0)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - ); - - assert_noop!( - ParaInclusion::process_candidates( - &allowed_relay_parents, - vec![backed], - &[chain_b_assignment].into_iter().collect(), - &group_validators, - ), - Error::::UnscheduledCandidate - ); - } + // no candidates. + assert_eq!( + ParaInclusion::process_candidates( + &allowed_relay_parents, + vec![], + &group_validators, + false + ), + Ok(ProcessedCandidates::default()) + ); // candidates out of order. { @@ -984,6 +968,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -993,15 +978,16 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); // out-of-order manifests as unscheduled. assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, + false ), Error::::ScheduledOutOfOrder ); @@ -1027,14 +1013,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Lacking, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::InsufficientBacking ); @@ -1075,6 +1062,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1084,14 +1072,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_b, backed_a], - &[chain_a_assignment, chain_b_assignment].into_iter().collect(), + vec![(backed_b, chain_b_assignment.1), (backed_a, chain_a_assignment.1)], &group_validators, + false ), Error::::DisallowedRelayParent ); @@ -1122,14 +1111,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[thread_a_assignment].into_iter().collect(), + vec![(backed, thread_a_assignment.1)], &group_validators, + false ), Error::::NotCollatorSigned ); @@ -1156,6 +1146,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let candidate = TestCandidateBuilder::default().build(); @@ -1177,9 +1168,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1212,14 +1203,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::CandidateScheduledBeforeParaFree ); @@ -1249,6 +1241,7 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); { @@ -1267,9 +1260,9 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::PrematureCodeUpgrade ); @@ -1296,14 +1289,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_eq!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Err(Error::::ValidationDataHashMismatch.into()), ); @@ -1331,14 +1325,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::InvalidValidationCodeHash ); @@ -1366,14 +1361,15 @@ fn candidate_checks() { &keystore, &signing_context, BackingKind::Threshold, + None, ); assert_noop!( ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed], - &[chain_a_assignment].into_iter().collect(), + vec![(backed, chain_a_assignment.1)], &group_validators, + false ), Error::::ParaHeadMismatch ); @@ -1486,6 +1482,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1495,6 +1492,7 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1504,15 +1502,20 @@ fn backing_works() { &keystore, &signing_context, BackingKind::Threshold, + None, ); - let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; + let backed_candidates = vec![ + (backed_a.clone(), chain_a_assignment.1), + (backed_b.clone(), chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates .iter() .enumerate() - .map(|(idx, backed_candidate)| (backed_candidate.hash(), GroupIndex(idx as _))) + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) .collect::>(); move |candidate_hash_x: CandidateHash| -> Option { @@ -1532,10 +1535,8 @@ fn backing_works() { } = ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1554,22 +1555,22 @@ fn backing_works() { CandidateHash, (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), >::new(); - backed_candidates.into_iter().for_each(|backed_candidate| { + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { let candidate_receipt_with_backers = intermediate .entry(backed_candidate.hash()) .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); - - assert_eq!( - backed_candidate.validity_votes.len(), - backed_candidate.validator_indices.count_ones() - ); + let (validator_indices, None) = + backed_candidate.validator_indices_and_core_index(false) + else { + panic!("Expected no injected core index") + }; + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); candidate_receipt_with_backers.1.extend( - backed_candidate - .validator_indices + validator_indices .iter() .enumerate() .filter(|(_, signed)| **signed) - .zip(backed_candidate.validity_votes.iter().cloned()) + .zip(backed_candidate.validity_votes().iter().cloned()) .filter_map(|((validator_index_within_group, _), attestation)| { let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); group_validators(grp_idx).map(|validator_indices| { @@ -1666,6 +1667,257 @@ fn backing_works() { }); } +#[test] +fn backing_works_with_elastic_scaling_mvp() { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + let thread_a = ParaId::from(3_u32); + + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + + let paras = vec![ + (chain_a, ParaKind::Parachain), + (chain_b, ParaKind::Parachain), + (thread_a, ParaKind::Parathread), + ]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory()); + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + + new_test_ext(genesis_config(paras)).execute_with(|| { + shared::Pallet::::set_active_validators_ascending(validator_public.clone()); + shared::Pallet::::set_session_index(5); + + run_to_block(5, |_| None); + + let signing_context = + SigningContext { parent_hash: System::parent_hash(), session_index: 5 }; + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), + group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), + group_index if group_index == GroupIndex::from(2) => Some(vec![4]), + _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + } + .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) + }; + + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + + let mut candidate_b_1 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_1); + + let mut candidate_b_2 = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b_2); + + let backed_a = back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + + let backed_b_1 = back_candidate( + candidate_b_1.clone(), + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(1)), + ); + + let backed_b_2 = back_candidate( + candidate_b_2.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + Some(CoreIndex(2)), + ); + + let backed_candidates = vec![ + (backed_a.clone(), CoreIndex(0)), + (backed_b_1.clone(), CoreIndex(1)), + (backed_b_2.clone(), CoreIndex(2)), + ]; + let get_backing_group_idx = { + // the order defines the group implicitly for this test case + let backed_candidates_with_groups = backed_candidates + .iter() + .enumerate() + .map(|(idx, (backed_candidate, _))| (backed_candidate.hash(), GroupIndex(idx as _))) + .collect::>(); + + move |candidate_hash_x: CandidateHash| -> Option { + backed_candidates_with_groups.iter().find_map(|(candidate_hash, grp)| { + if *candidate_hash == candidate_hash_x { + Some(*grp) + } else { + None + } + }) + } + }; + + let ProcessedCandidates { + core_indices: occupied_cores, + candidate_receipt_with_backing_validator_indices, + } = ParaInclusion::process_candidates( + &allowed_relay_parents, + backed_candidates.clone(), + &group_validators, + true, + ) + .expect("candidates scheduled, in order, and backed"); + + // Both b candidates will be backed. However, only one will be recorded on-chain and proceed + // with being made available. + assert_eq!( + occupied_cores, + vec![ + (CoreIndex::from(0), chain_a), + (CoreIndex::from(1), chain_b), + (CoreIndex::from(2), chain_b), + ] + ); + + // Transform the votes into the setup we expect + let mut expected = std::collections::HashMap::< + CandidateHash, + (CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>), + >::new(); + backed_candidates.into_iter().for_each(|(backed_candidate, _)| { + let candidate_receipt_with_backers = expected + .entry(backed_candidate.hash()) + .or_insert_with(|| (backed_candidate.receipt(), Vec::new())); + let (validator_indices, _maybe_core_index) = + backed_candidate.validator_indices_and_core_index(true); + assert_eq!(backed_candidate.validity_votes().len(), validator_indices.count_ones()); + candidate_receipt_with_backers.1.extend( + validator_indices + .iter() + .enumerate() + .filter(|(_, signed)| **signed) + .zip(backed_candidate.validity_votes().iter().cloned()) + .filter_map(|((validator_index_within_group, _), attestation)| { + let grp_idx = get_backing_group_idx(backed_candidate.hash()).unwrap(); + group_validators(grp_idx).map(|validator_indices| { + (validator_indices[validator_index_within_group], attestation) + }) + }), + ); + }); + + assert_eq!( + expected, + candidate_receipt_with_backing_validator_indices + .into_iter() + .map(|c| (c.0.hash(), c)) + .collect() + ); + + let backers = { + let num_backers = effective_minimum_backing_votes( + group_validators(GroupIndex(0)).unwrap().len(), + configuration::Pallet::::config().minimum_backing_votes, + ); + backing_bitfield(&(0..num_backers).collect::>()) + }; + assert_eq!( + >::get(&chain_a), + Some(CandidatePendingAvailability { + core: CoreIndex::from(0), + hash: candidate_a.hash(), + descriptor: candidate_a.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers, + backing_group: GroupIndex::from(0), + }) + ); + assert_eq!( + >::get(&chain_a), + Some(candidate_a.commitments), + ); + + // Only one candidate for b will be recorded on chain. + assert_eq!( + >::get(&chain_b), + Some(CandidatePendingAvailability { + core: CoreIndex::from(2), + hash: candidate_b_2.hash(), + descriptor: candidate_b_2.descriptor, + availability_votes: default_availability_votes(), + relay_parent_number: System::block_number() - 1, + backed_in_number: System::block_number(), + backers: backing_bitfield(&[4]), + backing_group: GroupIndex::from(2), + }) + ); + assert_eq!( + >::get(&chain_b), + Some(candidate_b_2.commitments), + ); + }); +} + #[test] fn can_include_candidate_with_ok_code_upgrade() { let chain_a = ParaId::from(1_u32); @@ -1740,14 +1992,15 @@ fn can_include_candidate_with_ok_code_upgrade() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); @@ -1932,6 +2185,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_a, BackingKind::Threshold, + None, ); let backed_b = back_candidate( @@ -1941,6 +2195,7 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_b, BackingKind::Threshold, + None, ); let backed_c = back_candidate( @@ -1950,17 +2205,20 @@ fn check_allowed_relay_parents() { &keystore, &signing_context_c, BackingKind::Threshold, + None, ); - let backed_candidates = vec![backed_a, backed_b, backed_c]; + let backed_candidates = vec![ + (backed_a, chain_a_assignment.1), + (backed_b, chain_b_assignment.1), + (backed_c, thread_a_assignment.1), + ]; ParaInclusion::process_candidates( &allowed_relay_parents, backed_candidates.clone(), - &[chain_a_assignment, chain_b_assignment, thread_a_assignment] - .into_iter() - .collect(), &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); }); @@ -2189,14 +2447,15 @@ fn para_upgrade_delay_scheduled_from_inclusion() { &keystore, &signing_context, BackingKind::Threshold, + None, ); let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( &allowed_relay_parents, - vec![backed_a], - &[chain_a_assignment].into_iter().collect(), + vec![(backed_a, chain_a_assignment.1)], &group_validators, + false, ) .expect("candidates scheduled, in order, and backed"); diff --git a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs index 0f6b23ae1b39..ad3fa8e0dc71 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/benchmarking.rs @@ -120,7 +120,7 @@ benchmarks! { // with `v` validity votes. // let votes = v as usize; let votes = min(scheduler::Pallet::::group_validators(GroupIndex::from(0)).unwrap().len(), v as usize); - assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), votes); + assert_eq!(benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes); benchmark.bitfields.clear(); benchmark.disputes.clear(); @@ -177,7 +177,7 @@ benchmarks! { // There is 1 backed assert_eq!(benchmark.backed_candidates.len(), 1); assert_eq!( - benchmark.backed_candidates.get(0).unwrap().validity_votes.len(), + benchmark.backed_candidates.get(0).unwrap().validity_votes().len(), votes, ); diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 81e092f0a991..cebf858c24ab 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -43,15 +43,14 @@ use frame_support::{ use frame_system::pallet_prelude::*; use pallet_babe::{self, ParentBlockRandomness}; use primitives::{ - effective_minimum_backing_votes, BackedCandidate, CandidateHash, CandidateReceipt, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CoreIndex, DisputeStatementSet, - InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes, - SessionIndex, SignedAvailabilityBitfields, SigningContext, UncheckedSignedAvailabilityBitfield, - UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation, - PARACHAINS_INHERENT_IDENTIFIER, + effective_minimum_backing_votes, vstaging::node_features::FeatureIndex, BackedCandidate, + CandidateHash, CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CoreIndex, DisputeStatementSet, InherentData as ParachainsInherentData, + MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SignedAvailabilityBitfields, + SigningContext, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, + ValidatorId, ValidatorIndex, ValidityAttestation, PARACHAINS_INHERENT_IDENTIFIER, }; use rand::{seq::SliceRandom, SeedableRng}; - use scale_info::TypeInfo; use sp_runtime::traits::{Header as HeaderT, One}; use sp_std::{ @@ -145,6 +144,10 @@ pub mod pallet { DisputeInvalid, /// A candidate was backed by a disabled validator BackedByDisabled, + /// A candidate was backed even though the paraid was not scheduled. + BackedOnUnscheduledCore, + /// Too many candidates supplied. + UnscheduledCandidate, } /// Whether the paras inherent was included within this block. @@ -585,25 +588,39 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::free_cores_and_fill_claimqueue(freed, now); - let scheduled = >::scheduled_paras() - .map(|(core_idx, para_id)| (para_id, core_idx)) - .collect(); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); - let SanitizedBackedCandidates { backed_candidates, votes_from_disabled_were_dropped } = - sanitize_backed_candidates::( - backed_candidates, - &allowed_relay_parents, - |candidate_idx: usize, - backed_candidate: &BackedCandidate<::Hash>| - -> bool { - let para_id = backed_candidate.descriptor().para_id; - let prev_context = >::para_most_recent_context(para_id); - let check_ctx = CandidateCheckContext::::new(prev_context); - - // never include a concluded-invalid candidate - current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || + let core_index_enabled = configuration::Pallet::::config() + .node_features + .get(FeatureIndex::ElasticScalingMVP as usize) + .map(|b| *b) + .unwrap_or(false); + + let mut scheduled: BTreeMap> = BTreeMap::new(); + let mut total_scheduled_cores = 0; + + for (core_idx, para_id) in >::scheduled_paras() { + total_scheduled_cores += 1; + scheduled.entry(para_id).or_default().insert(core_idx); + } + + let SanitizedBackedCandidates { + backed_candidates_with_core, + votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, + } = sanitize_backed_candidates::( + backed_candidates, + &allowed_relay_parents, + |candidate_idx: usize, + backed_candidate: &BackedCandidate<::Hash>| + -> bool { + let para_id = backed_candidate.descriptor().para_id; + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + + // never include a concluded-invalid candidate + current_concluded_invalid_disputes.contains(&backed_candidate.hash()) || // Instead of checking the candidates with code upgrades twice // move the checking up here and skip it in the training wheels fallback. // That way we avoid possible duplicate checks while assuring all @@ -611,13 +628,19 @@ impl Pallet { // // NOTE: this is the only place where we check the relay-parent. check_ctx - .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate) + .verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate.candidate()) .is_err() - }, - &scheduled, - ); + }, + scheduled, + core_index_enabled, + ); + + ensure!( + backed_candidates_with_core.len() <= total_scheduled_cores, + Error::::UnscheduledCandidate + ); - METRICS.on_candidates_sanitized(backed_candidates.len() as u64); + METRICS.on_candidates_sanitized(backed_candidates_with_core.len() as u64); // In `Enter` context (invoked during execution) there should be no backing votes from // disabled validators because they should have been filtered out during inherent data @@ -626,15 +649,22 @@ impl Pallet { ensure!(!votes_from_disabled_were_dropped, Error::::BackedByDisabled); } + // In `Enter` context (invoked during execution) we shouldn't have filtered any candidates + // due to a para not being scheduled. They have been filtered during inherent data + // preparation (`ProvideInherent` context). Abort in such cases. + if context == ProcessInherentDataContext::Enter { + ensure!(!dropped_unscheduled_candidates, Error::::BackedOnUnscheduledCore); + } + // Process backed candidates according to scheduled cores. let inclusion::ProcessedCandidates::< as HeaderT>::Hash> { core_indices: occupied, candidate_receipt_with_backing_validator_indices, } = >::process_candidates( &allowed_relay_parents, - backed_candidates.clone(), - &scheduled, + backed_candidates_with_core.clone(), >::group_validators, + core_index_enabled, )?; // Note which of the scheduled cores were actually occupied by a backed candidate. >::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); @@ -651,8 +681,15 @@ impl Pallet { let bitfields = bitfields.into_iter().map(|v| v.into_unchecked()).collect(); - let processed = - ParachainsInherentData { bitfields, backed_candidates, disputes, parent_header }; + let processed = ParachainsInherentData { + bitfields, + backed_candidates: backed_candidates_with_core + .into_iter() + .map(|(candidate, _)| candidate) + .collect(), + disputes, + parent_header, + }; Ok((processed, Some(all_weight_after).into())) } } @@ -774,7 +811,7 @@ fn apply_weight_limit( .iter() .enumerate() .filter_map(|(idx, candidate)| { - candidate.candidate.commitments.new_validation_code.as_ref().map(|_code| idx) + candidate.candidate().commitments.new_validation_code.as_ref().map(|_code| idx) }) .collect::>(); @@ -916,16 +953,22 @@ pub(crate) fn sanitize_bitfields( // Result from `sanitize_backed_candidates` #[derive(Debug, PartialEq)] struct SanitizedBackedCandidates { - // Sanitized backed candidates. The `Vec` is sorted according to the occupied core index. - backed_candidates: Vec>, + // Sanitized backed candidates along with the assigned core. The `Vec` is sorted according to + // the occupied core index. + backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, // Set to true if any votes from disabled validators were dropped from the input. votes_from_disabled_were_dropped: bool, + // Set to true if any candidates were dropped due to filtering done in + // `map_candidates_to_cores` + dropped_unscheduled_candidates: bool, } /// Filter out: /// 1. any candidates that have a concluded invalid dispute -/// 2. all backing votes from disabled validators -/// 3. any candidates that end up with less than `effective_minimum_backing_votes` backing votes +/// 2. any unscheduled candidates, as well as candidates whose paraid has multiple cores assigned +/// but have no injected core index. +/// 3. all backing votes from disabled validators +/// 4. any candidates that end up with less than `effective_minimum_backing_votes` backing votes /// /// `scheduled` follows the same naming scheme as provided in the /// guide: Currently `free` but might become `occupied`. @@ -944,7 +987,8 @@ fn sanitize_backed_candidates< mut backed_candidates: Vec>, allowed_relay_parents: &AllowedRelayParentsTracker>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &BTreeMap, + scheduled: BTreeMap>, + core_index_enabled: bool, ) -> SanitizedBackedCandidates { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -952,22 +996,23 @@ fn sanitize_backed_candidates< !candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate) }); - // Assure the backed candidate's `ParaId`'s core is free. - // This holds under the assumption that `Scheduler::schedule` is called _before_. - // We don't check the relay-parent because this is done in the closure when - // constructing the inherent and during actual processing otherwise. - - backed_candidates.retain(|backed_candidate| { - let desc = backed_candidate.descriptor(); + let initial_candidate_count = backed_candidates.len(); + // Map candidates to scheduled cores. Filter out any unscheduled candidates. + let mut backed_candidates_with_core = map_candidates_to_cores::( + &allowed_relay_parents, + scheduled, + core_index_enabled, + backed_candidates, + ); - scheduled.get(&desc.para_id).is_some() - }); + let dropped_unscheduled_candidates = + initial_candidate_count != backed_candidates_with_core.len(); // Filter out backing statements from disabled validators - let dropped_disabled = filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + let votes_from_disabled_were_dropped = filter_backed_statements_from_disabled_validators::( + &mut backed_candidates_with_core, &allowed_relay_parents, - scheduled, + core_index_enabled, ); // Sort the `Vec` last, once there is a guarantee that these @@ -975,14 +1020,12 @@ fn sanitize_backed_candidates< // but more importantly are scheduled for a free core. // This both avoids extra work for obviously invalid candidates, // but also allows this to be done in place. - backed_candidates.sort_by(|x, y| { - // Never panics, since we filtered all panic arguments out in the previous `fn retain`. - scheduled[&x.descriptor().para_id].cmp(&scheduled[&y.descriptor().para_id]) - }); + backed_candidates_with_core.sort_by(|(_x, core_x), (_y, core_y)| core_x.cmp(&core_y)); SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: dropped_disabled, + dropped_unscheduled_candidates, + votes_from_disabled_were_dropped, + backed_candidates_with_core, } } @@ -1071,9 +1114,12 @@ fn limit_and_sanitize_disputes< // few more sanity checks. Returns `true` if at least one statement is removed and `false` // otherwise. fn filter_backed_statements_from_disabled_validators( - backed_candidates: &mut Vec::Hash>>, + backed_candidates_with_core: &mut Vec<( + BackedCandidate<::Hash>, + CoreIndex, + )>, allowed_relay_parents: &AllowedRelayParentsTracker>, - scheduled: &BTreeMap, + core_index_enabled: bool, ) -> bool { let disabled_validators = BTreeSet::<_>::from_iter(shared::Pallet::::disabled_validators().into_iter()); @@ -1083,7 +1129,7 @@ fn filter_backed_statements_from_disabled_validators *core_idx, - None => { - log::debug!(target: LOG_TARGET, "Can't get core idx of a backed candidate for para id {:?}. Dropping the candidate.", bc.descriptor().para_id); - return false - } - }; + backed_candidates_with_core.retain_mut(|(bc, core_idx)| { + let (validator_indices, maybe_core_index) = bc.validator_indices_and_core_index(core_index_enabled); + let mut validator_indices = BitVec::<_>::from(validator_indices); // Get relay parent block number of the candidate. We need this to get the group index assigned to this core at this block number let relay_parent_block_number = match allowed_relay_parents @@ -1116,7 +1156,7 @@ fn filter_backed_statements_from_disabled_validators>::group_assigned_to_core( - core_idx, + *core_idx, relay_parent_block_number + One::one(), ) { Some(group_idx) => group_idx, @@ -1138,12 +1178,15 @@ fn filter_backed_statements_from_disabled_validators::from_iter(validator_group.iter().map(|idx| disabled_validators.contains(idx))); // The indices of statements from disabled validators in `BackedCandidate`. We have to drop these. - let indices_to_drop = disabled_indices.clone() & &bc.validator_indices; + let indices_to_drop = disabled_indices.clone() & &validator_indices; // Apply the bitmask to drop the disabled validator from `validator_indices` - bc.validator_indices &= !disabled_indices; + validator_indices &= !disabled_indices; + // Update the backed candidate + bc.set_validator_indices_and_core_index(validator_indices, maybe_core_index); + // Remove the corresponding votes from `validity_votes` for idx in indices_to_drop.iter_ones().rev() { - bc.validity_votes.remove(idx); + bc.validity_votes_mut().remove(idx); } // If at least one statement was dropped we need to return `true` @@ -1154,10 +1197,9 @@ fn filter_backed_statements_from_disabled_validators( + allowed_relay_parents: &AllowedRelayParentsTracker>, + mut scheduled: BTreeMap>, + core_index_enabled: bool, + candidates: Vec>, +) -> Vec<(BackedCandidate, CoreIndex)> { + let mut backed_candidates_with_core = Vec::with_capacity(candidates.len()); + + // We keep a candidate if the parachain has only one core assigned or if + // a core index is provided by block author and it's indeed scheduled. + for backed_candidate in candidates { + let maybe_injected_core_index = get_injected_core_index::( + allowed_relay_parents, + &backed_candidate, + core_index_enabled, + ); + + let scheduled_cores = scheduled.get_mut(&backed_candidate.descriptor().para_id); + // Candidates without scheduled cores are silently filtered out. + if let Some(scheduled_cores) = scheduled_cores { + if let Some(core_idx) = maybe_injected_core_index { + if scheduled_cores.contains(&core_idx) { + scheduled_cores.remove(&core_idx); + backed_candidates_with_core.push((backed_candidate, core_idx)); + } + } else if scheduled_cores.len() == 1 { + backed_candidates_with_core + .push((backed_candidate, scheduled_cores.pop_first().expect("Length is 1"))); + } + } + } + + backed_candidates_with_core +} + +fn get_injected_core_index( + allowed_relay_parents: &AllowedRelayParentsTracker>, + candidate: &BackedCandidate, + core_index_enabled: bool, +) -> Option { + // After stripping the 8 bit extensions, the `validator_indices` field length is expected + // to be equal to backing group size. If these don't match, the `CoreIndex` is badly encoded, + // or not supported. + let (validator_indices, maybe_core_idx) = + candidate.validator_indices_and_core_index(core_index_enabled); + + let Some(core_idx) = maybe_core_idx else { return None }; + + let relay_parent_block_number = + match allowed_relay_parents.acquire_info(candidate.descriptor().relay_parent, None) { + Some((_, block_num)) => block_num, + None => { + log::debug!( + target: LOG_TARGET, + "Relay parent {:?} for candidate {:?} is not in the allowed relay parents. Dropping the candidate.", + candidate.descriptor().relay_parent, + candidate.candidate().hash(), + ); + return None + }, + }; + + // Get the backing group of the candidate backed at `core_idx`. + let group_idx = match >::group_assigned_to_core( + core_idx, + relay_parent_block_number + One::one(), + ) { + Some(group_idx) => group_idx, + None => { + log::debug!( + target: LOG_TARGET, + "Can't get the group index for core idx {:?}. Dropping the candidate {:?}.", + core_idx, + candidate.candidate().hash(), + ); + return None + }, + }; + + let group_validators = match >::group_validators(group_idx) { + Some(validators) => validators, + None => return None, + }; + + if group_validators.len() == validator_indices.len() { + Some(core_idx) + } else { + None + } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/tests.rs b/polkadot/runtime/parachains/src/paras_inherent/tests.rs index 6f3eac35685a..defb2f4404f5 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/tests.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/tests.rs @@ -26,7 +26,10 @@ mod enter { use crate::{ builder::{Bench, BenchBuilder}, mock::{mock_assigner, new_test_ext, BlockLength, BlockWeights, MockGenesisConfig, Test}, - scheduler::common::Assignment, + scheduler::{ + common::{Assignment, AssignmentProvider, AssignmentProviderConfig}, + ParasEntry, + }, }; use assert_matches::assert_matches; use frame_support::assert_ok; @@ -697,6 +700,25 @@ mod enter { 2 ); + // One core was scheduled. We should put the assignment back, before calling enter(). + let now = >::block_number() + 1; + let used_cores = 5; + let cores = (0..used_cores) + .into_iter() + .map(|i| { + let AssignmentProviderConfig { ttl, .. } = + scheduler::Pallet::::assignment_provider_config(CoreIndex(i)); + // Load an assignment into provider so that one is present to pop + let assignment = + ::AssignmentProvider::get_mock_assignment( + CoreIndex(i), + ParaId::from(i), + ); + (CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into()) + }) + .collect(); + scheduler::ClaimQueue::::set(cores); + assert_ok!(Pallet::::enter( frame_system::RawOrigin::None.into(), limit_inherent_data, @@ -980,6 +1002,7 @@ mod sanitizers { AvailabilityBitfield, GroupIndex, Hash, Id as ParaId, SignedAvailabilityBitfield, ValidatorIndex, }; + use rstest::rstest; use sp_core::crypto::UncheckedFrom; use crate::mock::Test; @@ -1238,12 +1261,13 @@ mod sanitizers { // Backed candidates and scheduled parachains used for `sanitize_backed_candidates` testing struct TestData { backed_candidates: Vec, - scheduled_paras: BTreeMap, + all_backed_candidates_with_core: Vec<(BackedCandidate, CoreIndex)>, + scheduled_paras: BTreeMap>, } // Generate test data for the candidates and assert that the evnironment is set as expected // (check the comments for details) - fn get_test_data() -> TestData { + fn get_test_data(core_index_enabled: bool) -> TestData { const RELAY_PARENT_NUM: u32 = 3; // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing @@ -1285,9 +1309,14 @@ mod sanitizers { shared::Pallet::::set_active_validators_ascending(validator_ids); // Two scheduled parachains - ParaId(1) on CoreIndex(0) and ParaId(2) on CoreIndex(1) - let scheduled = (0_usize..2) + let scheduled: BTreeMap> = (0_usize..2) .into_iter() - .map(|idx| (ParaId::from(1_u32 + idx as u32), CoreIndex::from(idx as u32))) + .map(|idx| { + ( + ParaId::from(1_u32 + idx as u32), + [CoreIndex::from(idx as u32)].into_iter().collect(), + ) + }) .collect::>(); // Set the validator groups in `scheduler` @@ -1301,7 +1330,7 @@ mod sanitizers { ( CoreIndex::from(0), VecDeque::from([ParasEntry::new( - Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, RELAY_PARENT_NUM, )]), ), @@ -1319,12 +1348,12 @@ mod sanitizers { match group_index { group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), - _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + _ => panic!("Group index out of bounds"), } .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; - // Two backed candidates from each parachain + // One backed candidate from each parachain let backed_candidates = (0_usize..2) .into_iter() .map(|idx0| { @@ -1348,6 +1377,7 @@ mod sanitizers { &keystore, &signing_context, BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(idx0 as u32)), ); backed }) @@ -1369,13 +1399,373 @@ mod sanitizers { ] ); - TestData { backed_candidates, scheduled_paras: scheduled } + let all_backed_candidates_with_core = backed_candidates + .iter() + .map(|candidate| { + // Only one entry for this test data. + ( + candidate.clone(), + scheduled + .get(&candidate.descriptor().para_id) + .unwrap() + .first() + .copied() + .unwrap(), + ) + }) + .collect(); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } } - #[test] - fn happy_path() { + // Generate test data for the candidates and assert that the evnironment is set as expected + // (check the comments for details) + // Para 1 scheduled on core 0 and core 1. Two candidates are supplied. + // Para 2 scheduled on cores 2 and 3. One candidate supplied. + // Para 3 scheduled on core 4. One candidate supplied. + // Para 4 scheduled on core 5. Two candidates supplied. + // Para 5 scheduled on core 6. No candidates supplied. + fn get_test_data_multiple_cores_per_para(core_index_enabled: bool) -> TestData { + const RELAY_PARENT_NUM: u32 = 3; + + // Add the relay parent to `shared` pallet. Otherwise some code (e.g. filtering backing + // votes) won't behave correctly + shared::Pallet::::add_allowed_relay_parent( + default_header().hash(), + Default::default(), + RELAY_PARENT_NUM, + 1, + ); + + let header = default_header(); + let relay_parent = header.hash(); + let session_index = SessionIndex::from(0_u32); + + let keystore = LocalKeystore::in_memory(); + let keystore = Arc::new(keystore) as KeystorePtr; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + + let validators = vec![ + keyring::Sr25519Keyring::Alice, + keyring::Sr25519Keyring::Bob, + keyring::Sr25519Keyring::Charlie, + keyring::Sr25519Keyring::Dave, + keyring::Sr25519Keyring::Eve, + keyring::Sr25519Keyring::Ferdie, + keyring::Sr25519Keyring::One, + ]; + for validator in validators.iter() { + Keystore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + + // Set active validators in `shared` pallet + let validator_ids = + validators.iter().map(|v| v.public().into()).collect::>(); + shared::Pallet::::set_active_validators_ascending(validator_ids); + + // Set the validator groups in `scheduler` + scheduler::Pallet::::set_validator_groups(vec![ + vec![ValidatorIndex(0)], + vec![ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3)], + vec![ValidatorIndex(4)], + vec![ValidatorIndex(5)], + vec![ValidatorIndex(6)], + ]); + + // Update scheduler's claimqueue with the parachains + scheduler::Pallet::::set_claimqueue(BTreeMap::from([ + ( + CoreIndex::from(0), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(0) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(1), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 1.into(), core_index: CoreIndex(1) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(2), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(2) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(3), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 2.into(), core_index: CoreIndex(3) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(4), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 3.into(), core_index: CoreIndex(4) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(5), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 4.into(), core_index: CoreIndex(5) }, + RELAY_PARENT_NUM, + )]), + ), + ( + CoreIndex::from(6), + VecDeque::from([ParasEntry::new( + Assignment::Pool { para_id: 5.into(), core_index: CoreIndex(6) }, + RELAY_PARENT_NUM, + )]), + ), + ])); + + // Callback used for backing candidates + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0]), + group_index if group_index == GroupIndex::from(1) => Some(vec![1]), + group_index if group_index == GroupIndex::from(2) => Some(vec![2]), + group_index if group_index == GroupIndex::from(3) => Some(vec![3]), + group_index if group_index == GroupIndex::from(4) => Some(vec![4]), + group_index if group_index == GroupIndex::from(5) => Some(vec![5]), + group_index if group_index == GroupIndex::from(6) => Some(vec![6]), + + _ => panic!("Group index out of bounds"), + } + .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) + }; + + let mut backed_candidates = vec![]; + let mut all_backed_candidates_with_core = vec![]; + + // Para 1 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(1 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed: BackedCandidate = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(0 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(0 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(0))); + } + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(1), + relay_parent, + pov_hash: Hash::repeat_byte(2 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(1 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(1 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(1))); + } + } + + // Para 2 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(2), + relay_parent, + pov_hash: Hash::repeat_byte(3 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(2 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(2 as u32)), + ); + backed_candidates.push(backed.clone()); + if core_index_enabled { + all_backed_candidates_with_core.push((backed, CoreIndex(2))); + } + } + + // Para 3 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(3), + relay_parent, + pov_hash: Hash::repeat_byte(4 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(4 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(4 as u32)), + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(4))); + } + + // Para 4 + { + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(5 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + None, + ); + backed_candidates.push(backed.clone()); + all_backed_candidates_with_core.push((backed, CoreIndex(5))); + + let mut candidate = TestCandidateBuilder { + para_id: ParaId::from(4), + relay_parent, + pov_hash: Hash::repeat_byte(6 as u8), + persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + let backed = back_candidate( + candidate, + &validators, + group_validators(GroupIndex::from(5 as u32)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + core_index_enabled.then_some(CoreIndex(5 as u32)), + ); + backed_candidates.push(backed.clone()); + } + + // No candidate for para 5. + + // State sanity checks + assert_eq!( + >::scheduled_paras().collect::>(), + vec![ + (CoreIndex(0), ParaId::from(1)), + (CoreIndex(1), ParaId::from(1)), + (CoreIndex(2), ParaId::from(2)), + (CoreIndex(3), ParaId::from(2)), + (CoreIndex(4), ParaId::from(3)), + (CoreIndex(5), ParaId::from(4)), + (CoreIndex(6), ParaId::from(5)), + ] + ); + let mut scheduled: BTreeMap> = BTreeMap::new(); + for (core_idx, para_id) in >::scheduled_paras() { + scheduled.entry(para_id).or_default().insert(core_idx); + } + + assert_eq!( + shared::Pallet::::active_validator_indices(), + vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ValidatorIndex(5), + ValidatorIndex(6), + ] + ); + + TestData { + backed_candidates, + scheduled_paras: scheduled, + all_backed_candidates_with_core, + } + } + + #[rstest] + #[case(false)] + #[case(true)] + fn happy_path(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { + backed_candidates, + all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data(core_index_enabled); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; @@ -1385,47 +1775,95 @@ mod sanitizers { backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled + scheduled, + core_index_enabled ), SanitizedBackedCandidates { - backed_candidates, - votes_from_disabled_were_dropped: false + backed_candidates_with_core: all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_unscheduled_candidates: false } ); + }); + } + + #[rstest] + #[case(false)] + #[case(true)] + fn test_with_multiple_cores_per_para(#[case] core_index_enabled: bool) { + new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let TestData { + backed_candidates, + all_backed_candidates_with_core: expected_all_backed_candidates_with_core, + scheduled_paras: scheduled, + } = get_test_data_multiple_cores_per_para(core_index_enabled); + + let has_concluded_invalid = + |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; - {} + assert_eq!( + sanitize_backed_candidates::( + backed_candidates.clone(), + &>::allowed_relay_parents(), + has_concluded_invalid, + scheduled, + core_index_enabled + ), + SanitizedBackedCandidates { + backed_candidates_with_core: expected_all_backed_candidates_with_core, + votes_from_disabled_were_dropped: false, + dropped_unscheduled_candidates: true + } + ); }); } // nothing is scheduled, so no paraids match, thus all backed candidates are skipped - #[test] - fn nothing_scheduled() { + #[rstest] + #[case(false, false)] + #[case(true, true)] + #[case(false, true)] + #[case(true, false)] + fn nothing_scheduled( + #[case] core_index_enabled: bool, + #[case] multiple_cores_per_para: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: _ } = get_test_data(); - let scheduled = &BTreeMap::new(); + let TestData { backed_candidates, .. } = if multiple_cores_per_para { + get_test_data_multiple_cores_per_para(core_index_enabled) + } else { + get_test_data(core_index_enabled) + }; + let scheduled = BTreeMap::new(); let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, + scheduled, + core_index_enabled, ); assert!(sanitized_backed_candidates.is_empty()); assert!(!votes_from_disabled_were_dropped); + assert!(dropped_unscheduled_candidates); }); } // candidates that have concluded as invalid are filtered out - #[test] - fn invalid_are_filtered_out() { + #[rstest] + #[case(false)] + #[case(true)] + fn invalid_are_filtered_out(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { backed_candidates, scheduled_paras: scheduled } = get_test_data(); + let TestData { backed_candidates, scheduled_paras: scheduled, .. } = + get_test_data(core_index_enabled); // mark every second one as concluded invalid let set = { @@ -1440,45 +1878,55 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); let SanitizedBackedCandidates { - backed_candidates: sanitized_backed_candidates, + backed_candidates_with_core: sanitized_backed_candidates, votes_from_disabled_were_dropped, + dropped_unscheduled_candidates, } = sanitize_backed_candidates::( backed_candidates.clone(), &>::allowed_relay_parents(), has_concluded_invalid, - &scheduled, + scheduled, + core_index_enabled, ); assert_eq!(sanitized_backed_candidates.len(), backed_candidates.len() / 2); assert!(!votes_from_disabled_were_dropped); + assert!(!dropped_unscheduled_candidates); }); } - #[test] - fn disabled_non_signing_validator_doesnt_get_filtered() { + #[rstest] + #[case(false)] + #[case(true)] + fn disabled_non_signing_validator_doesnt_get_filtered(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Eve set_disabled_validators(vec![4]); - let before = backed_candidates.clone(); + let before = all_backed_candidates_with_core.clone(); // Eve is disabled but no backing statement is signed by it so nothing should be // filtered assert!(!filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); - assert_eq!(backed_candidates, before); + assert_eq!(all_backed_candidates_with_core, before); }); } - - #[test] - fn drop_statements_from_disabled_without_dropping_candidate() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_statements_from_disabled_without_dropping_candidate( + #[case] core_index_enabled: bool, + ) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice set_disabled_validators(vec![0]); @@ -1491,61 +1939,83 @@ mod sanitizers { configuration::Pallet::::force_set_active_config(hc); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), - true + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 ); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), - true - ); - let untouched = backed_candidates.get(1).unwrap().clone(); + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + + assert_eq!(validator_indices.get(0).unwrap(), true); + assert_eq!(validator_indices.get(1).unwrap(), true); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); + let (validator_indices, maybe_core_index) = all_backed_candidates_with_core + .get(0) + .unwrap() + .0 + .validator_indices_and_core_index(core_index_enabled); + if core_index_enabled { + assert!(maybe_core_index.is_some()); + } else { + assert!(maybe_core_index.is_none()); + } + // there should still be two backed candidates - assert_eq!(backed_candidates.len(), 2); + assert_eq!(all_backed_candidates_with_core.len(), 2); // but the first one should have only one validity vote - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 1); - // Validator 0 vote should be dropped, validator 1 - retained assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(0).unwrap(), - false - ); - assert_eq!( - backed_candidates.get(0).unwrap().validator_indices.get(1).unwrap(), - true + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 1 ); + // Validator 0 vote should be dropped, validator 1 - retained + assert_eq!(validator_indices.get(0).unwrap(), false); + assert_eq!(validator_indices.get(1).unwrap(), true); // the second candidate shouldn't be modified - assert_eq!(*backed_candidates.get(1).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.get(1).unwrap().0, untouched); }); } - #[test] - fn drop_candidate_if_all_statements_are_from_disabled() { + #[rstest] + #[case(false)] + #[case(true)] + fn drop_candidate_if_all_statements_are_from_disabled(#[case] core_index_enabled: bool) { new_test_ext(MockGenesisConfig::default()).execute_with(|| { - let TestData { mut backed_candidates, scheduled_paras } = get_test_data(); + let TestData { mut all_backed_candidates_with_core, .. } = + get_test_data(core_index_enabled); // Disable Alice and Bob set_disabled_validators(vec![0, 1]); // Verify the initial state is as expected - assert_eq!(backed_candidates.get(0).unwrap().validity_votes.len(), 2); - let untouched = backed_candidates.get(1).unwrap().clone(); + assert_eq!( + all_backed_candidates_with_core.get(0).unwrap().0.validity_votes().len(), + 2 + ); + let untouched = all_backed_candidates_with_core.get(1).unwrap().0.clone(); assert!(filter_backed_statements_from_disabled_validators::( - &mut backed_candidates, + &mut all_backed_candidates_with_core, &>::allowed_relay_parents(), - &scheduled_paras + core_index_enabled )); - assert_eq!(backed_candidates.len(), 1); - assert_eq!(*backed_candidates.get(0).unwrap(), untouched); + assert_eq!(all_backed_candidates_with_core.len(), 1); + assert_eq!(all_backed_candidates_with_core.get(0).unwrap().0, untouched); }); } } diff --git a/polkadot/runtime/parachains/src/paras_inherent/weights.rs b/polkadot/runtime/parachains/src/paras_inherent/weights.rs index 05cc53fae046..0f4e5be572a6 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/weights.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/weights.rs @@ -149,11 +149,11 @@ pub fn backed_candidate_weight( candidate: &BackedCandidate, ) -> Weight { set_proof_size_to_tx_size( - if candidate.candidate.commitments.new_validation_code.is_some() { + if candidate.candidate().commitments.new_validation_code.is_some() { <::WeightInfo as WeightInfo>::enter_backed_candidate_code_upgrade() } else { <::WeightInfo as WeightInfo>::enter_backed_candidates_variable( - candidate.validity_votes.len() as u32, + candidate.validity_votes().len() as u32, ) }, candidate, diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml new file mode 100644 index 000000000000..0dfd814e10a5 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.toml @@ -0,0 +1,38 @@ +[settings] +timeout = 1000 +bootnode = true + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 2 + needed_approvals = 4 + coretime_cores = 2 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + validator = "true" + + [[relaychain.node_groups]] + name = "validator" + count = 3 + args = [ "-lparachain=debug,runtime=debug"] + +[[parachains]] +id = 2000 +default_command = "polkadot-parachain" +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "collator2000" + command = "polkadot-parachain" + args = [ "-lparachain=debug" ] diff --git a/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl new file mode 100644 index 000000000000..a7193c9282b9 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-elastic-scaling-mvp.zndsl @@ -0,0 +1,28 @@ +Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled +Network: ./0012-elastic-scaling-mvp.toml +Creds: config + +# Check authority status. +validator: reports node_roles is 4 + +validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds + +# Ensure parachain was able to make progress. +validator: parachain 2000 block height is at least 10 within 200 seconds + +# Register the second core assigned to this parachain. +alice: js-script ./0012-register-para.js return is 0 within 600 seconds + +validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds + +# Parachain will now be stalled +validator: parachain 2000 block height is lower than 20 within 300 seconds + +# Enable the ElasticScalingMVP node feature. +alice: js-script ./0012-enable-node-feature.js with "1" return is 0 within 600 seconds + +# Wait two sessions for the config to be updated. +sleep 120 seconds + +# Ensure parachain is now making progress. +validator: parachain 2000 block height is at least 30 within 200 seconds diff --git a/polkadot/zombienet_tests/functional/0012-enable-node-feature.js b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js new file mode 100644 index 000000000000..4822e1f66447 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-enable-node-feature.js @@ -0,0 +1,37 @@ +async function run(nodeName, networkInfo, index) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.configuration.setNodeFeature(Number(index), true)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/zombienet_tests/functional/0012-register-para.js b/polkadot/zombienet_tests/functional/0012-register-para.js new file mode 100644 index 000000000000..25c7e4f5ffdd --- /dev/null +++ b/polkadot/zombienet_tests/functional/0012-register-para.js @@ -0,0 +1,37 @@ +async function run(nodeName, networkInfo, _jsArgs) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // account to submit tx + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(0, 35, [[{ task: 2000 }, 57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/prdoc/pr_3231.prdoc b/prdoc/pr_3231.prdoc new file mode 100644 index 000000000000..26e96d3635b1 --- /dev/null +++ b/prdoc/pr_3231.prdoc @@ -0,0 +1,11 @@ +title: Allow parachain which acquires multiple coretime cores to make progress + +doc: + - audience: Node Operator + description: | + Adds the needed changes so that parachains which acquire multiple coretime cores can still make progress. + Only one of the cores will be able to be occupied at a time. + Only works if the ElasticScalingMVP node feature is enabled in the runtime and the block author validator is + updated to include this change. + +crates: [ ] From de6d02591b57d03f70ed8db0c674f045ad2ea029 Mon Sep 17 00:00:00 2001 From: tmpolaczyk <44604217+tmpolaczyk@users.noreply.github.com> Date: Sat, 24 Feb 2024 11:34:05 +0100 Subject: [PATCH 34/51] Use generic hash for runtime wasm in resolve_state_version_from_wasm (#3447) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes the runtime hash algorithm used in `resolve_state_version_from_wasm` from `DefaultHasher` to a caller-provided one (usually `HashingFor`), to match the one used elsewhere. This fixes an issue where the runtime wasm is compiled 3 times when starting the `tanssi-node` with `--dev`. With this fix, the runtime wasm is only compiled 2 times. The other redundant compilation is caused by the `GenesisConfigBuilderRuntimeCaller` struct, which ignores the runtime cache. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_3447.prdoc | 13 +++++++++++++ .../client/chain-spec/src/genesis_block.rs | 18 ++++++++---------- substrate/client/service/src/client/client.rs | 6 ++++-- 3 files changed, 25 insertions(+), 12 deletions(-) create mode 100644 prdoc/pr_3447.prdoc diff --git a/prdoc/pr_3447.prdoc b/prdoc/pr_3447.prdoc new file mode 100644 index 000000000000..1d8d4f409f77 --- /dev/null +++ b/prdoc/pr_3447.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Use generic hash for runtime wasm in resolve_state_version_from_wasm + +doc: + - audience: Node Dev + description: | + Changes the runtime hash algorithm used in resolve_state_version_from_wasm from DefaultHasher to a caller-provided + one (usually HashingFor). Fixes a bug where the runtime wasm was being compiled again when it was not + needed, because the hash did not match +crates: + - name: sc-chain-spec diff --git a/substrate/client/chain-spec/src/genesis_block.rs b/substrate/client/chain-spec/src/genesis_block.rs index 6aa156a620a7..3c7b9f64dcd6 100644 --- a/substrate/client/chain-spec/src/genesis_block.rs +++ b/substrate/client/chain-spec/src/genesis_block.rs @@ -18,23 +18,25 @@ //! Tool for creating the genesis block. -use std::{collections::hash_map::DefaultHasher, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; +use codec::Encode; use sc_client_api::{backend::Backend, BlockImportOperation}; use sc_executor::RuntimeVersionOf; use sp_core::storage::{well_known_keys, StateVersion, Storage}; use sp_runtime::{ - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}, + traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, Zero}, BuildStorage, }; /// Return the state version given the genesis storage and executor. -pub fn resolve_state_version_from_wasm( +pub fn resolve_state_version_from_wasm( storage: &Storage, executor: &E, ) -> sp_blockchain::Result where E: RuntimeVersionOf, + H: HashT, { if let Some(wasm) = storage.top.get(well_known_keys::CODE) { let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. @@ -43,12 +45,7 @@ where let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, - hash: { - use std::hash::{Hash, Hasher}; - let mut state = DefaultHasher::new(); - wasm.hash(&mut state); - state.finish().to_le_bytes().to_vec() - }, + hash: ::hash(wasm).encode(), }; let runtime_version = RuntimeVersionOf::runtime_version(executor, &mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string()))?; @@ -129,7 +126,8 @@ impl, E: RuntimeVersionOf> BuildGenesisBlock sp_blockchain::Result<(Block, Self::BlockImportOperation)> { let Self { genesis_storage, commit_genesis_state, backend, executor, _phantom } = self; - let genesis_state_version = resolve_state_version_from_wasm(&genesis_storage, &executor)?; + let genesis_state_version = + resolve_state_version_from_wasm::<_, HashingFor>(&genesis_storage, &executor)?; let mut op = backend.begin_operation()?; let state_root = op.set_genesis_state(genesis_storage, commit_genesis_state, genesis_state_version)?; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index aa9c1b80a29a..108c258595a7 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -675,8 +675,10 @@ where // This is use by fast sync for runtime version to be resolvable from // changes. - let state_version = - resolve_state_version_from_wasm(&storage, &self.executor)?; + let state_version = resolve_state_version_from_wasm::<_, HashingFor>( + &storage, + &self.executor, + )?; let state_root = operation.op.reset_storage(storage, state_version)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in From ce5de99540eb1ceaed4b0a5d085983c4a273fdda Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 26 Feb 2024 07:23:28 +0100 Subject: [PATCH 35/51] Remove redundant parachains assigner pallet. (#3457) from Westend and Rococo. --------- Co-authored-by: eskimor Co-authored-by: command-bot <> --- polkadot/runtime/rococo/src/lib.rs | 8 ++------ polkadot/runtime/westend/src/lib.rs | 8 ++------ 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 03e96ab388b7..1566911fc228 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -46,9 +46,8 @@ use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, - assigner_on_demand as parachains_assigner_on_demand, - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, coretime, disputes as parachains_disputes, + assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, @@ -1038,8 +1037,6 @@ impl parachains_assigner_on_demand::Config for Runtime { type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} - impl parachains_assigner_coretime::Config for Runtime {} impl parachains_initializer::Config for Runtime { @@ -1401,7 +1398,6 @@ construct_runtime! { ParasSlashing: parachains_slashing = 63, MessageQueue: pallet_message_queue = 64, OnDemandAssignmentProvider: parachains_assigner_on_demand = 66, - ParachainsAssignmentProvider: parachains_assigner_parachains = 67, CoretimeAssignmentProvider: parachains_assigner_coretime = 68, // Parachain Onboarding Pallets. Start indices at 70 to leave room. diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index bbb010f60bff..b0b70bff8de9 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -68,9 +68,8 @@ use runtime_common::{ }; use runtime_parachains::{ assigner_coretime as parachains_assigner_coretime, - assigner_on_demand as parachains_assigner_on_demand, - assigner_parachains as parachains_assigner_parachains, - configuration as parachains_configuration, coretime, disputes as parachains_disputes, + assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration, + coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, inclusion::{AggregateMessageOrigin, UmpQueueId}, @@ -1243,8 +1242,6 @@ impl parachains_assigner_on_demand::Config for Runtime { type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; } -impl parachains_assigner_parachains::Config for Runtime {} - impl parachains_assigner_coretime::Config for Runtime {} impl parachains_initializer::Config for Runtime { @@ -1505,7 +1502,6 @@ construct_runtime! { ParaSessionInfo: parachains_session_info = 52, ParasDisputes: parachains_disputes = 53, ParasSlashing: parachains_slashing = 54, - ParachainsAssignmentProvider: parachains_assigner_parachains = 55, OnDemandAssignmentProvider: parachains_assigner_on_demand = 56, CoretimeAssignmentProvider: parachains_assigner_coretime = 57, From b9c792a4b8adb4617cafe7e0b8949b88f5a9d247 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Mon, 26 Feb 2024 10:06:21 +0200 Subject: [PATCH 36/51] Add more debug logs to understand if statement-distribution misbehaving (#3419) Add more debug logs to understand if statement-distribution is in a bad state, should be useful for debugging https://github.com/paritytech/polkadot-sdk/issues/3314 on production networks. Additionally, increase the number of parallel requests should make, since I notice that requests take around 100ms on kusama, and the 5 parallel request was picked mostly random, no reason why we can do more than that. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: ordian --- .../statement-distribution/src/v2/cluster.rs | 25 ++++++++++++++++++- .../statement-distribution/src/v2/mod.rs | 17 ++++++++++++- .../statement-distribution/src/v2/requests.rs | 14 ++++++++++- 3 files changed, 53 insertions(+), 3 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/cluster.rs index 619114de9670..c09916e56201 100644 --- a/polkadot/node/network/statement-distribution/src/v2/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/cluster.rs @@ -55,8 +55,9 @@ //! and to keep track of what we have sent to other validators in the group and what we may //! continue to send them. -use polkadot_primitives::{CandidateHash, CompactStatement, ValidatorIndex}; +use polkadot_primitives::{CandidateHash, CompactStatement, Hash, ValidatorIndex}; +use crate::LOG_TARGET; use std::collections::{HashMap, HashSet}; #[derive(Hash, PartialEq, Eq)] @@ -424,6 +425,28 @@ impl ClusterTracker { fn is_in_group(&self, validator: ValidatorIndex) -> bool { self.validators.contains(&validator) } + + /// Dumps pending statement for this cluster. + /// + /// Normally we should not have pending statements to validators in our cluster, + /// but if we do for all validators in our cluster, then we don't participate + /// in backing. Ocasional pending statements are expected if two authorities + /// can't detect each otehr or after restart, where it takes a while to discover + /// the whole network. + + pub fn warn_if_too_many_pending_statements(&self, parent_hash: Hash) { + if self.pending.iter().filter(|pending| !pending.1.is_empty()).count() >= + self.validators.len() + { + gum::warn!( + target: LOG_TARGET, + pending_statements = ?self.pending, + ?parent_hash, + "Cluster has too many pending statements, something wrong with our connection to our group peers \n + Restart might be needed if validator gets 0 backing rewards for more than 3-4 consecutive sessions" + ); + } + } } /// Incoming statement was accepted. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index dc29c19a48e3..2c9cdba4ea8e 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -251,6 +251,13 @@ impl PerSessionState { if local_index.is_some() { self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); } + + gum::info!( + target: LOG_TARGET, + index_in_gossip_topology = ?local_index, + index_in_parachain_authorities = ?self.local_validator, + "Node uses the following topology indices" + ); } /// Returns `true` if local is neither active or inactive validator node. @@ -768,7 +775,15 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { let pruned = state.implicit_view.deactivate_leaf(*leaf); for pruned_rp in pruned { // clean up per-relay-parent data based on everything removed. - state.per_relay_parent.remove(&pruned_rp); + state + .per_relay_parent + .remove(&pruned_rp) + .as_ref() + .and_then(|pruned| pruned.active_validator_state()) + .map(|active_state| { + active_state.cluster_tracker.warn_if_too_many_pending_statements(pruned_rp) + }); + // clean up requests related to this relay parent. state.request_manager.remove_by_relay_parent(*leaf); } diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index bed3d5c18ae2..bbcb268415e6 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -315,7 +315,16 @@ impl RequestManager { request_props: impl Fn(&CandidateIdentifier) -> Option, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option> { - if response_manager.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + // The number of parallel requests a node can answer is limited by + // `MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS`, however there is no + // need for the current node to limit itself to the same amount the + // requests, because the requests are going to different nodes anyways. + // While looking at https://github.com/paritytech/polkadot-sdk/issues/3314, + // found out that this requests take around 100ms to fullfill, so it + // would make sense to try to request things as early as we can, given + // we would need to request it for each candidate, around 25 right now + // on kusama. + if response_manager.len() >= 2 * MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { return None } @@ -1027,6 +1036,7 @@ mod tests { let peer_advertised = |_identifier: &CandidateIdentifier, _peer: &_| { Some(StatementFilter::full(group_size)) }; + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); @@ -1148,6 +1158,7 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); @@ -1230,6 +1241,7 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager .next_request(&mut response_manager, request_props, peer_advertised) .unwrap(); From 3d9439f64652e60d1dee85ca98fd865775d9ca45 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 26 Feb 2024 09:12:28 +0100 Subject: [PATCH 37/51] [pallet-xcm] Adjust benchmarks (teleport_assets/reserve_transfer_assets) not relying on ED (#3464) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Problem During the bumping of the `polkadot-fellows` repository to `polkadot-sdk@1.6.0`, I encountered a situation where the benchmarks `teleport_assets` and `reserve_transfer_assets` in AssetHubKusama started to fail. This issue arose due to a decreased ED balance for AssetHubs introduced [here](https://github.com/polkadot-fellows/runtimes/pull/158/files#diff-80668ff8e793b64f36a9a3ec512df5cbca4ad448c157a5d81abda1b15f35f1daR213), and also because of a [missing CI pipeline](https://github.com/polkadot-fellows/runtimes/issues/197) to check the benchmarks, which went unnoticed. These benchmarks expect the `caller` to have enough: 1. balance to transfer (BTT) 2. balance for paying delivery (BFPD). So the initial balance was calculated as `ED * 100`, which seems reasonable: ``` const ED_MULTIPLIER: u32 = 100; let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into());` ``` The problem arises when the price for delivery is 100 times higher than the existential deposit. In other words, when `ED * 100` does not cover `BTT` + `BFPD`. I check AHR/AHW/AHK/AHP and this problem has only AssetHubKusama ``` ED: 3333333 calculated price to parent delivery: 1031666634 (from xcm logs from the benchmark) --- 3333333 * 100 - BTT(3333333) - BFPD(1031666634) = −701666667 ``` which results in the error; ``` 2024-02-23 09:19:42 Unable to charge fee with error Module(ModuleError { index: 31, error: [17, 0, 0, 0], message: Some("FeesNotMet") }) Error: Input("Benchmark pallet_xcm::reserve_transfer_assets failed: FeesNotMet") ``` ## Solution The benchmarks `teleport_assets` and `reserve_transfer_assets` were fixed by removing `ED * 100` and replacing it with `DeliveryHelper` logic, which calculates the (almost real) price for delivery and sets it along with the existential deposit as the initial balance for the account used in the benchmark. ## TODO - [ ] patch for 1.6 - https://github.com/paritytech/polkadot-sdk/pull/3466 - [ ] patch for 1.7 - https://github.com/paritytech/polkadot-sdk/pull/3465 - [ ] patch for 1.8 - TODO: PR --------- Co-authored-by: Francisco Aguirre --- Cargo.lock | 3 +- cumulus/pallets/parachain-system/Cargo.toml | 4 + cumulus/pallets/parachain-system/src/lib.rs | 9 +++ .../assets/asset-hub-rococo/src/lib.rs | 42 +++++++---- .../assets/asset-hub-westend/src/lib.rs | 42 +++++++---- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 8 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 8 +- .../collectives-westend/src/lib.rs | 15 +++- .../contracts/contracts-rococo/src/lib.rs | 23 +++++- .../coretime/coretime-rococo/src/lib.rs | 8 +- .../coretime/coretime-westend/src/lib.rs | 8 +- .../runtimes/people/people-rococo/src/lib.rs | 8 +- .../runtimes/people/people-westend/src/lib.rs | 8 +- cumulus/primitives/utility/Cargo.toml | 3 - cumulus/primitives/utility/src/lib.rs | 11 ++- polkadot/runtime/common/Cargo.toml | 4 - polkadot/runtime/common/src/xcm_sender.rs | 15 +++- polkadot/runtime/rococo/src/lib.rs | 28 +++++-- polkadot/runtime/westend/src/lib.rs | 46 ++++++++---- polkadot/runtime/westend/src/xcm_config.rs | 2 +- polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs | 32 +------- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 73 ++++++++++++------- polkadot/xcm/pallet-xcm/src/mock.rs | 22 ++++++ .../xcm/xcm-builder/src/currency_adapter.rs | 2 +- .../xcm/xcm-builder/src/fungible_adapter.rs | 2 +- .../xcm/xcm-builder/src/fungibles_adapter.rs | 2 +- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/src/routing.rs | 35 +++++++++ 28 files changed, 329 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a23be53b34fd..fae9eacdab9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3874,6 +3874,7 @@ dependencies = [ "pallet-message-queue", "parity-scale-codec", "polkadot-parachain-primitives", + "polkadot-runtime-common", "polkadot-runtime-parachains", "rand", "sc-client-api", @@ -4086,7 +4087,6 @@ dependencies = [ "frame-support", "log", "pallet-asset-conversion", - "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -13277,7 +13277,6 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "pallet-xcm-benchmarks", "parity-scale-codec", "polkadot-primitives", "polkadot-primitives-test-helpers", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 86647290563c..7e0442f0b585 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -36,6 +36,7 @@ sp-version = { path = "../../../substrate/primitives/version", default-features # Polkadot polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false, optional = true } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus @@ -79,6 +80,7 @@ std = [ "log/std", "pallet-message-queue/std", "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "scale-info/std", "sp-core/std", @@ -102,6 +104,7 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-message-queue/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] @@ -110,6 +113,7 @@ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", "pallet-message-queue/try-runtime", + "polkadot-runtime-common?/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5a0fa57fb171..d86a67e58f44 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1610,6 +1610,15 @@ impl UpwardMessageSender for Pallet { } } +#[cfg(feature = "runtime-benchmarks")] +impl polkadot_runtime_common::xcm_sender::EnsureForParachain for Pallet { + fn ensure(para_id: ParaId) { + if let ChannelStatus::Closed = Self::get_channel_status(para_id) { + Self::open_outbound_hrmp_channel_for_benchmarks_or_tests(para_id) + } + } +} + /// Something that can check the inherents of a block. #[cfg_attr( feature = "parameterized-consensus-hook", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 7e550c329177..81bb66a56a29 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1355,8 +1355,31 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1365,7 +1388,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -1373,17 +1396,13 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, - ParentThen(Parachain(random_para_id).into()).into(), + // AH can reserve transfer native token to some random parachain. + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), )) } @@ -1469,13 +1488,6 @@ impl_runtime_apis! { use xcm_config::{TokenLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - TokenLocation::get(), - ExistentialDeposit::get() - ).into()); - } - impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a3166106073e..3f58c679eeed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1427,8 +1427,31 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + WestendLocation::get(), + ExistentialDeposit::get() + ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1437,7 +1460,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), @@ -1445,17 +1468,13 @@ impl_runtime_apis! { } fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, - ParentThen(Parachain(random_para_id).into()).into(), + // AH can reserve transfer native token to some random parachain. + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), )) } @@ -1546,13 +1565,6 @@ impl_runtime_apis! { use xcm_config::{MaxAssetsIntoHolding, WestendLocation}; use pallet_xcm_benchmarks::asset_instance_from; - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - WestendLocation::get(), - ExistentialDeposit::get() - ).into()); - } - impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; type AccountIdConverter = xcm_config::LocationToAccountId; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index ae50d2a93cb9..15cf045a9956 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1109,6 +1109,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -1117,7 +1123,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between BH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 997320ffcf26..bd42a33370dc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -806,6 +806,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -814,7 +820,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between BH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 4299e8bb3ec0..36eb0e07f87c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -986,8 +986,21 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + xcm_config::WndLocation::get(), + ExistentialDeposit::get() + ).into()); + } + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -996,7 +1009,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between Collectives and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }.into(), Parent.into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 94cb0e502e4c..541978098caa 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -50,7 +50,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8}, + traits::{ConstBool, ConstU16, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -205,6 +205,10 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (CollatorSelection,); } +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + impl pallet_balances::Config for Runtime { type MaxLocks = ConstU32<50>; /// The type for recording an account's balance. @@ -212,7 +216,7 @@ impl pallet_balances::Config for Runtime { /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; type DustRemoval = (); - type ExistentialDeposit = ConstU128; + type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; type MaxReserves = ConstU32<50>; @@ -713,9 +717,22 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + xcm_config::RelayLocation::get(), + ExistentialDeposit::get() + ).into()); + } + use xcm::latest::prelude::*; use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -724,7 +741,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between Contracts-System-Para and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index f07bac8b2ef0..fcd7576b1e17 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -715,6 +715,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -723,7 +729,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 85d70d2f17b6..80eb7863803b 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -706,6 +706,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -714,7 +720,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between AH and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 570dc0fa12c3..90c398917695 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -686,6 +686,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -694,7 +700,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between People and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index a47df66f5039..a904f7c3521b 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -686,6 +686,12 @@ impl_runtime_apis! { use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn reachable_dest() -> Option { Some(Parent.into()) } @@ -694,7 +700,7 @@ impl_runtime_apis! { // Relay/native token can be teleported between People and Relay. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Parent.into()) }, Parent.into(), diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 45c0e6670942..1e2c300b9ba2 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -26,7 +26,6 @@ polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", d xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } -pallet-xcm-benchmarks = { path = "../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } @@ -39,7 +38,6 @@ std = [ "frame-support/std", "log/std", "pallet-asset-conversion/std", - "pallet-xcm-benchmarks/std", "polkadot-runtime-common/std", "polkadot-runtime-parachains/std", "sp-io/std", @@ -54,7 +52,6 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-support/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index 0d8921227429..abc391bdcb8e 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -760,7 +760,7 @@ mod test_trader { } } -/// Implementation of `pallet_xcm_benchmarks::EnsureDelivery` which helps to ensure delivery to the +/// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// parent relay chain. Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). /// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). @@ -774,17 +774,22 @@ impl< XcmConfig: xcm_executor::Config, ExistentialDeposit: Get>, PriceForDelivery: PriceForMessageDelivery, - > pallet_xcm_benchmarks::EnsureDelivery + > xcm_builder::EnsureDelivery for ToParentDeliveryHelper { fn ensure_successful_delivery( origin_ref: &Location, - _dest: &Location, + dest: &Location, fee_reason: xcm_executor::traits::FeeReason, ) -> (Option, Option) { use xcm::latest::{MAX_INSTRUCTIONS_TO_DECODE, MAX_ITEMS_IN_ASSETS}; use xcm_executor::{traits::FeeManager, FeesMode}; + // check if the destination is relay/parent + if dest.ne(&Location::parent()) { + return (None, None); + } + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 2467d123f02f..eae5d4fb2ef9 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -58,8 +58,6 @@ runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parac slot-range-helper = { path = "slot_range_helper", default-features = false } xcm = { package = "staging-xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false, optional = true } - -pallet-xcm-benchmarks = { path = "../../xcm/pallet-xcm-benchmarks", default-features = false, optional = true } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } [dev-dependencies] @@ -99,7 +97,6 @@ std = [ "pallet-transaction-payment/std", "pallet-treasury/std", "pallet-vesting/std", - "pallet-xcm-benchmarks/std", "parity-scale-codec/std", "primitives/std", "runtime-parachains/std", @@ -137,7 +134,6 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", "primitives/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 7f1100a13619..46d09afcfb2c 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -136,7 +136,7 @@ where } } -/// Implementation of `pallet_xcm_benchmarks::EnsureDelivery` which helps to ensure delivery to the +/// Implementation of `xcm_builder::EnsureDelivery` which helps to ensure delivery to the /// `ParaId` parachain (sibling or child). Deposits existential deposit for origin (if needed). /// Deposits estimated fee to the origin account (if needed). /// Allows to trigger additional logic for specific `ParaId` (e.g. open HRMP channel) (if neeeded). @@ -164,7 +164,7 @@ impl< PriceForDelivery: PriceForMessageDelivery, Parachain: Get, ToParachainHelper: EnsureForParachain, - > pallet_xcm_benchmarks::EnsureDelivery + > xcm_builder::EnsureDelivery for ToParachainDeliveryHelper< XcmConfig, ExistentialDeposit, @@ -175,7 +175,7 @@ impl< { fn ensure_successful_delivery( origin_ref: &Location, - _dest: &Location, + dest: &Location, fee_reason: xcm_executor::traits::FeeReason, ) -> (Option, Option) { use xcm_executor::{ @@ -183,6 +183,15 @@ impl< FeesMode, }; + // check if the destination matches the expected `Parachain`. + if let Some(Parachain(para_id)) = dest.first_interior() { + if ParaId::from(*para_id) != Parachain::get().into() { + return (None, None) + } + } else { + return (None, None) + } + let mut fees_mode = None; if !XcmConfig::FeeManager::is_waived(Some(origin_ref), fee_reason) { // if not waived, we need to set up accounts for paying and receiving fees diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1566911fc228..bbc79a13d37f 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2272,12 +2272,30 @@ sp_api::impl_runtime_apis! { TokenLocation::get(), ExistentialDeposit::get() ).into()); - pub ToParachain: ParaId = rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub AssetHubParaId: ParaId = rococo_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub const RandomParaId: ParaId = ParaId::new(43211234); } impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + runtime_common::xcm_sender::ToParachainDeliveryHelper< + XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + (), + >, + runtime_common::xcm_sender::ToParachainDeliveryHelper< + XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + RandomParaId, + (), + > + ); + fn reachable_dest() -> Option { Some(crate::xcm_config::AssetHub::get()) } @@ -2286,7 +2304,7 @@ sp_api::impl_runtime_apis! { // Relay/native token can be teleported to/from AH. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, crate::xcm_config::AssetHub::get(), @@ -2297,10 +2315,10 @@ sp_api::impl_runtime_apis! { // Relay can reserve transfer native token to some random parachain. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, - Parachain(43211234).into(), + Parachain(RandomParaId::get().into()).into(), )) } @@ -2325,7 +2343,7 @@ sp_api::impl_runtime_apis! { XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, - ToParachain, + AssetHubParaId, (), >; fn valid_destination() -> Result { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b0b70bff8de9..574710b3110d 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2344,7 +2344,36 @@ sp_api::impl_runtime_apis! { impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + + use xcm_config::{AssetHub, TokenLocation}; + + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub AssetHubParaId: ParaId = westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + (), + >, + runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + RandomParaId, + (), + > + ); + fn reachable_dest() -> Option { Some(crate::xcm_config::AssetHub::get()) } @@ -2352,7 +2381,7 @@ sp_api::impl_runtime_apis! { fn teleportable_asset_and_dest() -> Option<(Asset, Location)> { // Relay/native token can be teleported to/from AH. Some(( - Asset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: AssetId(Here.into()) }, + Asset { fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, crate::xcm_config::AssetHub::get(), )) } @@ -2361,10 +2390,10 @@ sp_api::impl_runtime_apis! { // Relay can reserve transfer native token to some random parachain. Some(( Asset { - fun: Fungible(EXISTENTIAL_DEPOSIT), + fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, - crate::Junction::Parachain(43211234).into(), + crate::Junction::Parachain(RandomParaId::get().into()).into(), )) } @@ -2391,15 +2420,6 @@ sp_api::impl_runtime_apis! { AssetId, Fungibility::*, InteriorLocation, Junction, Junctions::*, Asset, Assets, Location, NetworkId, Response, }; - use xcm_config::{AssetHub, TokenLocation}; - - parameter_types! { - pub ExistentialDepositAsset: Option = Some(( - TokenLocation::get(), - ExistentialDeposit::get() - ).into()); - pub ToParachain: ParaId = westend_runtime_constants::system_parachain::ASSET_HUB_ID.into(); - } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = xcm_config::XcmConfig; @@ -2408,7 +2428,7 @@ sp_api::impl_runtime_apis! { xcm_config::XcmConfig, ExistentialDepositAsset, xcm_config::PriceForChildParachainDelivery, - ToParachain, + AssetHubParaId, (), >; fn valid_destination() -> Result { diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 06124d896fbb..66c56171af01 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -262,7 +262,7 @@ pub type LocalPalletOriginToLocation = ( StakingAdminToPlurality, // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `Location` value. FellowshipAdminToPlurality, - // `Treasurer` origin to be used in XCM as a corresponding Plurality `MultiLocation` value. + // `Treasurer` origin to be used in XCM as a corresponding Plurality `Location` value. TreasurerToPlurality, ); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 6ce8d3e99e8e..63ed0ac0ca73 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -22,10 +22,8 @@ use codec::Encode; use frame_benchmarking::{account, BenchmarkError}; use sp_std::prelude::*; use xcm::latest::prelude::*; -use xcm_executor::{ - traits::{ConvertLocation, FeeReason}, - Config as XcmConfig, FeesMode, -}; +use xcm_builder::EnsureDelivery; +use xcm_executor::{traits::ConvertLocation, Config as XcmConfig}; pub mod fungible; pub mod generic; @@ -114,29 +112,3 @@ pub fn account_and_location(index: u32) -> (T::AccountId, Location) { (account, location) } - -/// Trait for a type which ensures all requirements for successful delivery with XCM transport -/// layers. -pub trait EnsureDelivery { - /// Prepare all requirements for successful `XcmSender: SendXcm` passing (accounts, balances, - /// channels ...). Returns: - /// - possible `FeesMode` which is expected to be set to executor - /// - possible `Assets` which are expected to be subsume to the Holding Register - fn ensure_successful_delivery( - origin_ref: &Location, - dest: &Location, - fee_reason: FeeReason, - ) -> (Option, Option); -} - -/// `()` implementation does nothing which means no special requirements for environment. -impl EnsureDelivery for () { - fn ensure_successful_delivery( - _origin_ref: &Location, - _dest: &Location, - _fee_reason: FeeReason, - ) -> (Option, Option) { - // doing nothing - (None, None) - } -} diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index c7d8fb24e9df..e3ea2fb8c06d 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -17,21 +17,26 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; -use frame_support::{traits::Currency, weights::Weight}; +use frame_support::{ + traits::fungible::{Inspect, Mutate}, + weights::Weight, +}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; +use xcm_builder::EnsureDelivery; +use xcm_executor::traits::FeeReason; type RuntimeOrigin = ::RuntimeOrigin; -// existential deposit multiplier -const ED_MULTIPLIER: u32 = 100; - /// Pallet we're benchmarking here. pub struct Pallet(crate::Pallet); /// Trait that must be implemented by runtime to be able to benchmark pallet properly. pub trait Config: crate::Config { + /// Helper that ensures successful delivery for extrinsics/benchmarks which need `SendXcm`. + type DeliveryHelper: EnsureDelivery; + /// A `Location` that can be reached via `XcmRouter`. Used only in benchmarks. /// /// If `None`, the benchmarks that depend on a reachable destination will be skipped. @@ -107,23 +112,29 @@ benchmarks! { }.into(); let assets: Assets = asset.into(); - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - + let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { + if !T::XcmTeleportFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &origin_location, + &destination, + FeeReason::ChargeFees, + ); + + // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) + let balance = as Inspect<_>>::balance(&caller); + // Add transferred_amount to origin + as Mutate<_>>::mint_into(&caller, transferred_amount)?; + // verify initial balance + let balance = balance + transferred_amount; + assert_eq!( as Inspect<_>>::balance(&caller), balance); + let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); let versioned_beneficiary: VersionedLocation = @@ -132,7 +143,7 @@ benchmarks! { }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); } reserve_transfer_assets { @@ -146,23 +157,29 @@ benchmarks! { }.into(); let assets: Assets = asset.into(); - let existential_deposit = T::ExistentialDeposit::get(); - let caller = whitelisted_caller(); - - // Give some multiple of the existential deposit - let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - assert!(balance >= transferred_amount); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // verify initial balance - assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); - + let caller: T::AccountId = whitelisted_caller(); let send_origin = RawOrigin::Signed(caller.clone()); let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { + if !T::XcmReserveTransferFilter::contains(&(origin_location.clone(), assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } + // Ensure that origin can send to destination (e.g. setup delivery fees, ensure router setup, ...) + let (_, _) = T::DeliveryHelper::ensure_successful_delivery( + &origin_location, + &destination, + FeeReason::ChargeFees, + ); + + // Actual balance (e.g. `ensure_successful_delivery` could drip delivery fees, ...) + let balance = as Inspect<_>>::balance(&caller); + // Add transferred_amount to origin + as Mutate<_>>::mint_into(&caller, transferred_amount)?; + // verify initial balance + let balance = balance + transferred_amount; + assert_eq!( as Inspect<_>>::balance(&caller), balance); + let recipient = [0u8; 32]; let versioned_dest: VersionedLocation = destination.into(); let versioned_beneficiary: VersionedLocation = @@ -171,7 +188,7 @@ benchmarks! { }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) verify { // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) - assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + assert!( as Inspect<_>>::balance(&caller) <= balance - transferred_amount); } transfer_assets { diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 004a73ca6ab2..17c181387dde 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -563,8 +563,30 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +pub struct TestDeliveryHelper; +#[cfg(feature = "runtime-benchmarks")] +impl xcm_builder::EnsureDelivery for TestDeliveryHelper { + fn ensure_successful_delivery( + origin_ref: &Location, + _dest: &Location, + _fee_reason: xcm_executor::traits::FeeReason, + ) -> (Option, Option) { + use xcm_executor::traits::ConvertLocation; + let account = SovereignAccountOf::convert_location(origin_ref).expect("Valid location"); + // Give the existential deposit at least + let balance = ExistentialDeposit::get(); + let _ = >::make_free_balance_be( + &account, balance, + ); + (None, None) + } +} + #[cfg(feature = "runtime-benchmarks")] impl super::benchmarking::Config for Test { + type DeliveryHelper = TestDeliveryHelper; + fn reachable_dest() -> Option { Some(Parachain(1000).into()) } diff --git a/polkadot/xcm/xcm-builder/src/currency_adapter.rs b/polkadot/xcm/xcm-builder/src/currency_adapter.rs index fe26b7319bb1..24261ac06583 100644 --- a/polkadot/xcm/xcm-builder/src/currency_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/currency_adapter.rs @@ -170,7 +170,7 @@ impl< } fn can_check_out(_dest: &Location, what: &Asset, _context: &XcmContext) -> Result { - log::trace!(target: "xcm::currency_adapter", "check_out dest: {:?}, what: {:?}", _dest, what); + log::trace!(target: "xcm::currency_adapter", "can_check_out dest: {:?}, what: {:?}", _dest, what); let amount = Matcher::matches_fungible(what).ok_or(Error::AssetNotHandled)?; match CheckedAccount::get() { Some((checked_account, MintLocation::Local)) => diff --git a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs index 33b766c8560c..21c828922b33 100644 --- a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs @@ -151,7 +151,7 @@ impl< fn can_check_out(_dest: &Location, what: &Asset, _context: &XcmContext) -> XcmResult { log::trace!( target: "xcm::fungible_adapter", - "check_out dest: {:?}, what: {:?}", + "can_check_out dest: {:?}, what: {:?}", _dest, what ); diff --git a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs index 4574d5ed4c68..b4c418ebf1c9 100644 --- a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs @@ -235,7 +235,7 @@ impl< fn can_check_out(_origin: &Location, what: &Asset, _context: &XcmContext) -> XcmResult { log::trace!( target: "xcm::fungibles_adapter", - "can_check_in origin: {:?}, what: {:?}", + "can_check_out origin: {:?}, what: {:?}", _origin, what ); // Check we handle this asset. diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 42522c64d8a4..e2af8187136e 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -117,7 +117,7 @@ mod process_xcm_message; pub use process_xcm_message::ProcessXcmMessage; mod routing; -pub use routing::{WithTopicSource, WithUniqueTopic}; +pub use routing::{EnsureDelivery, WithTopicSource, WithUniqueTopic}; mod transactional; pub use transactional::FrameTransactionalProcessor; diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 9c0302baee06..529ef80c15ff 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -20,6 +20,7 @@ use frame_system::unique; use parity_scale_codec::Encode; use sp_std::{marker::PhantomData, result::Result}; use xcm::prelude::*; +use xcm_executor::{traits::FeeReason, FeesMode}; /// Wrapper router which, if the message does not already end with a `SetTopic` instruction, /// appends one to the message filled with a universally unique ID. This ID is returned from a @@ -104,3 +105,37 @@ impl SendXcm for WithTopicSource (Option, Option); +} + +/// Tuple implementation for `EnsureDelivery`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl EnsureDelivery for Tuple { + fn ensure_successful_delivery( + origin_ref: &Location, + dest: &Location, + fee_reason: FeeReason, + ) -> (Option, Option) { + for_tuples!( #( + // If the implementation returns something, we're done; if not, let others try. + match Tuple::ensure_successful_delivery(origin_ref, dest, fee_reason.clone()) { + r @ (Some(_), Some(_)) | r @ (Some(_), None) | r @ (None, Some(_)) => return r, + (None, None) => (), + } + )* ); + // doing nothing + (None, None) + } +} From 49266f102e5b5c07fbc3ef45db29c2fd053a0555 Mon Sep 17 00:00:00 2001 From: brenzi Date: Mon, 26 Feb 2024 09:15:44 +0100 Subject: [PATCH 38/51] add Encointer as trusted teleporter for Westend (#3411) with the deprecation of Rococo, Encointer needs a new staging environment. Paseo will be Polkadot-focused and westend Kusama-focused, so we propose to use Westend --- polkadot/runtime/westend/constants/src/lib.rs | 2 ++ polkadot/runtime/westend/src/xcm_config.rs | 3 +++ prdoc/pr_3411.prdoc | 11 +++++++++++ 3 files changed, 16 insertions(+) create mode 100644 prdoc/pr_3411.prdoc diff --git a/polkadot/runtime/westend/constants/src/lib.rs b/polkadot/runtime/westend/constants/src/lib.rs index 848cccd559dc..3bc27177d7a3 100644 --- a/polkadot/runtime/westend/constants/src/lib.rs +++ b/polkadot/runtime/westend/constants/src/lib.rs @@ -107,6 +107,8 @@ pub mod system_parachain { pub const COLLECTIVES_ID: u32 = 1001; /// BridgeHub parachain ID. pub const BRIDGE_HUB_ID: u32 = 1002; + /// Encointer parachain ID. + pub const ENCOINTER_ID: u32 = 1003; /// People Chain parachain ID. pub const PEOPLE_ID: u32 = 1004; /// Brokerage parachain ID. diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 66c56171af01..c3aa75a86a45 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -114,12 +114,14 @@ parameter_types! { pub AssetHub: Location = Parachain(ASSET_HUB_ID).into_location(); pub Collectives: Location = Parachain(COLLECTIVES_ID).into_location(); pub BridgeHub: Location = Parachain(BRIDGE_HUB_ID).into_location(); + pub Encointer: Location = Parachain(ENCOINTER_ID).into_location(); pub People: Location = Parachain(PEOPLE_ID).into_location(); pub Broker: Location = Parachain(BROKER_ID).into_location(); pub Wnd: AssetFilter = Wild(AllOf { fun: WildFungible, id: AssetId(TokenLocation::get()) }); pub WndForAssetHub: (AssetFilter, Location) = (Wnd::get(), AssetHub::get()); pub WndForCollectives: (AssetFilter, Location) = (Wnd::get(), Collectives::get()); pub WndForBridgeHub: (AssetFilter, Location) = (Wnd::get(), BridgeHub::get()); + pub WndForEncointer: (AssetFilter, Location) = (Wnd::get(), Encointer::get()); pub WndForPeople: (AssetFilter, Location) = (Wnd::get(), People::get()); pub WndForBroker: (AssetFilter, Location) = (Wnd::get(), Broker::get()); pub MaxInstructions: u32 = 100; @@ -130,6 +132,7 @@ pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, + xcm_builder::Case, xcm_builder::Case, xcm_builder::Case, ); diff --git a/prdoc/pr_3411.prdoc b/prdoc/pr_3411.prdoc new file mode 100644 index 000000000000..d847d6756ac7 --- /dev/null +++ b/prdoc/pr_3411.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: add Encointer as trusted teleporter for Westend + +doc: + - audience: Runtime Dev + description: | + add Encointer as trusted teleporter for Westend with ParaId 1003 + +crates: [ ] From 0893ca1584a41ea632214dbcaf8ae134b990f9e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 26 Feb 2024 11:32:35 +0100 Subject: [PATCH 39/51] frame-support: Improve error reporting when having too many pallets (#3478) Instead of only generating the error, we now generate the actual code and the error. This generates in total less errors and helps the user to identify the actual problem and not being confronted with tons of errors. --- .../procedural/src/construct_runtime/mod.rs | 43 ++++--- ...umber_of_pallets_exceeds_tuple_size.stderr | 106 ++++-------------- 2 files changed, 49 insertions(+), 100 deletions(-) diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 54ed15f7b1d3..6e95fdf116a6 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -233,25 +233,38 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let input_copy = input.clone(); let definition = syn::parse_macro_input!(input as RuntimeDeclaration); - let res = match definition { - RuntimeDeclaration::Implicit(implicit_def) => - check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()).and_then( - |_| construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), - ), - RuntimeDeclaration::Explicit(explicit_decl) => check_pallet_number( - input_copy.clone().into(), - explicit_decl.pallets.len(), - ) - .and_then(|_| { - construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl) - }), - RuntimeDeclaration::ExplicitExpanded(explicit_decl) => - check_pallet_number(input_copy.into(), explicit_decl.pallets.len()) - .and_then(|_| construct_runtime_final_expansion(explicit_decl)), + let (check_pallet_number_res, res) = match definition { + RuntimeDeclaration::Implicit(implicit_def) => ( + check_pallet_number(input_copy.clone().into(), implicit_def.pallets.len()), + construct_runtime_implicit_to_explicit(input_copy.into(), implicit_def), + ), + RuntimeDeclaration::Explicit(explicit_decl) => ( + check_pallet_number(input_copy.clone().into(), explicit_decl.pallets.len()), + construct_runtime_explicit_to_explicit_expanded(input_copy.into(), explicit_decl), + ), + RuntimeDeclaration::ExplicitExpanded(explicit_decl) => ( + check_pallet_number(input_copy.into(), explicit_decl.pallets.len()), + construct_runtime_final_expansion(explicit_decl), + ), }; let res = res.unwrap_or_else(|e| e.to_compile_error()); + // We want to provide better error messages to the user and thus, handle the error here + // separately. If there is an error, we print the error and still generate all of the code to + // get in overall less errors for the user. + let res = if let Err(error) = check_pallet_number_res { + let error = error.to_compile_error(); + + quote! { + #error + + #res + } + } else { + res + }; + let res = expander::Expander::new("construct_runtime") .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 3b6329c650fa..6160f8234a35 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -4,88 +4,24 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in 67 | pub struct Runtime | ^^^ -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:35:64 - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^^^^^^^^ not found in this scope - | -help: you might be missing a type parameter - | -35 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | +++++++++++++ - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:37:25 - | -37 | impl pallet::Config for Runtime {} - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 - | -40 | impl frame_system::Config for Runtime { - | ^^^^^^^ not found in this scope - -error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 - | -42 | type RuntimeOrigin = RuntimeOrigin; - | ^^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -42 | type RuntimeOrigin = Self::RuntimeOrigin; - | ++++++ - -error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 - | -44 | type RuntimeCall = RuntimeCall; - | ^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -44 | type RuntimeCall = Self::RuntimeCall; - | ++++++ - -error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 - | -50 | type RuntimeEvent = RuntimeEvent; - | ^^^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -50 | type RuntimeEvent = Self::RuntimeEvent; - | ++++++ - -error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 - | -56 | type PalletInfo = PalletInfo; - | ^^^^^^^^^^ - | -help: you might have meant to use the associated type - | -56 | type PalletInfo = Self::PalletInfo; - | ++++++ -help: consider importing one of these items - | -18 + use frame_benchmarking::__private::traits::PalletInfo; - | -18 + use frame_support::traits::PalletInfo; - | - -error[E0412]: cannot find type `RuntimeTask` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:1 - | -39 | #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: this error originates in the macro `frame_system::config_preludes::TestDefaultConfig` which comes from the expansion of the macro `frame_support::macro_magic::forward_tokens_verbatim` (in Nightly builds, run with -Z macro-backtrace for more info) -help: you might have meant to use the associated type - --> $WORKSPACE/substrate/frame/system/src/lib.rs - | - | type Self::RuntimeTask = (); - | ++++++ +error: recursion limit reached while expanding `frame_support::__private::tt_return!` + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + | +22 | / #[frame_support::pallet] +23 | | mod pallet { +24 | | #[pallet::config] +25 | | pub trait Config: frame_system::Config {} +... | +66 | |/ construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +... || +180 | || } +181 | || } + | ||_^ + | |_| + | in this macro invocation + | + = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) From 6c5a42a690f96d59dbdf94640188f5d5b5cc47e2 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Mon, 26 Feb 2024 12:45:30 +0100 Subject: [PATCH 40/51] Introduce Notification block pinning limit (#2935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While investigating some pruning issues I found some room for improvement in the notification pin handling. **Problem:** It was not possible to define an upper limit on notification pins. The block pinning cache has a limit, but only handles bodies and justifications. After this PR, bookkeeping for notifications is managed in the pinning worker. A limit can be defined in the worker. If that limit is crossed, blocks that were pinned for that notification are unpinned, which now affects the state as well as bodies and justifications. The pinned blocks cache still has a limit, but should never be hit. closes #19 --------- Co-authored-by: Bastian Köcher Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- Cargo.lock | 1 + substrate/client/api/src/client.rs | 27 +- substrate/client/db/src/lib.rs | 2 +- .../client/db/src/pinned_blocks_cache.rs | 2 +- substrate/client/service/Cargo.toml | 1 + substrate/client/service/src/client/client.rs | 53 +-- substrate/client/service/src/client/mod.rs | 1 + .../src/client/notification_pinning.rs | 353 ++++++++++++++++++ 8 files changed, 406 insertions(+), 34 deletions(-) create mode 100644 substrate/client/service/src/client/notification_pinning.rs diff --git a/Cargo.lock b/Cargo.lock index fae9eacdab9c..410b45d00183 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16729,6 +16729,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 46232c74539c..2de09840e4df 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -278,7 +278,7 @@ impl fmt::Display for UsageInfo { pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, } impl Debug for UnpinHandleInner { @@ -291,7 +291,7 @@ impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, unpin_worker_sender } } @@ -299,12 +299,25 @@ impl UnpinHandleInner { impl Drop for UnpinHandleInner { fn drop(&mut self) { - if let Err(err) = self.unpin_worker_sender.unbounded_send(self.hash) { + if let Err(err) = + self.unpin_worker_sender.unbounded_send(UnpinWorkerMessage::Unpin(self.hash)) + { log::debug!(target: "db", "Unable to unpin block with hash: {}, error: {:?}", self.hash, err); }; } } +/// Message that signals notification-based pinning actions to the pinning-worker. +/// +/// When the notification is dropped, an `Unpin` message should be sent to the worker. +#[derive(Debug)] +pub enum UnpinWorkerMessage { + /// Should be sent when a import or finality notification is created. + AnnouncePin(Block::Hash), + /// Should be sent when a import or finality notification is dropped. + Unpin(Block::Hash), +} + /// Keeps a specific block pinned while the handle is alive. /// Once the last handle instance for a given block is dropped, the /// block is unpinned in the [`Backend`](crate::backend::Backend::unpin_block). @@ -315,7 +328,7 @@ impl UnpinHandle { /// Create a new [`UnpinHandle`] pub fn new( hash: Block::Hash, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> UnpinHandle { UnpinHandle(Arc::new(UnpinHandleInner::new(hash, unpin_worker_sender))) } @@ -353,7 +366,7 @@ impl BlockImportNotification { header: Block::Header, is_new_best: bool, tree_route: Option>>, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> Self { Self { hash, @@ -412,7 +425,7 @@ impl FinalityNotification { /// Create finality notification from finality summary. pub fn from_summary( mut summary: FinalizeSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> FinalityNotification { let hash = summary.finalized.pop().unwrap_or_default(); FinalityNotification { @@ -436,7 +449,7 @@ impl BlockImportNotification { /// Create finality notification from finality summary. pub fn from_summary( summary: ImportSummary, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, ) -> BlockImportNotification { let hash = summary.hash; BlockImportNotification { diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 870b4150b9df..0faa90dfc4f9 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -2531,7 +2531,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.state_db.pin(&hash, number.saturated_into::(), hint).map_err( |_| { sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for `{:?}`", + "Unable to pin: state already discarded for `{:?}`", hash )) }, diff --git a/substrate/client/db/src/pinned_blocks_cache.rs b/substrate/client/db/src/pinned_blocks_cache.rs index 46c9287fb19a..ac4aad07765c 100644 --- a/substrate/client/db/src/pinned_blocks_cache.rs +++ b/substrate/client/db/src/pinned_blocks_cache.rs @@ -20,7 +20,7 @@ use schnellru::{Limiter, LruMap}; use sp_runtime::{traits::Block as BlockT, Justifications}; const LOG_TARGET: &str = "db::pin"; -const PINNING_CACHE_SIZE: usize = 1024; +const PINNING_CACHE_SIZE: usize = 2048; /// Entry for pinned blocks cache. struct PinnedBlockCacheEntry { diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 73edceb2ef36..bbf67d1fbd0a 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -84,6 +84,7 @@ tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "tim tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" +schnellru = "0.2.1" [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 108c258595a7..35e8b53a09cf 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -19,8 +19,8 @@ //! Substrate Client use super::block_rules::{BlockRules, LookupResult as BlockLookupResult}; -use futures::{FutureExt, StreamExt}; -use log::{error, info, trace, warn}; +use crate::client::notification_pinning::NotificationPinningWorker; +use log::{debug, info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; use rand::Rng; @@ -38,7 +38,7 @@ use sc_client_api::{ execution_extensions::ExecutionExtensions, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter, - ProofProvider, UsageProvider, + ProofProvider, UnpinWorkerMessage, UsageProvider, }; use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, @@ -114,7 +114,7 @@ where block_rules: BlockRules, config: ClientConfig, telemetry: Option, - unpin_worker_sender: TracingUnboundedSender, + unpin_worker_sender: TracingUnboundedSender>, _phantom: PhantomData, } @@ -326,19 +326,35 @@ where // dropped, the block will be unpinned automatically. if let Some(ref notification) = finality_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for finality notification. hash: {}, Error: {}", notification.hash, err ); - }; + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!( + "Unable to send AnnouncePin worker message for finality: {e}" + ) + }); + } } if let Some(ref notification) = import_notification { if let Err(err) = self.backend.pin_block(notification.hash) { - error!( + debug!( "Unable to pin block for import notification. hash: {}, Error: {}", notification.hash, err ); + } else { + let _ = self + .unpin_worker_sender + .unbounded_send(UnpinWorkerMessage::AnnouncePin(notification.hash)) + .map_err(|e| { + log::error!("Unable to send AnnouncePin worker message for import: {e}") + }); }; } @@ -416,25 +432,12 @@ where backend.commit_operation(op)?; } - let (unpin_worker_sender, mut rx) = - tracing_unbounded::("unpin-worker-channel", 10_000); - let task_backend = Arc::downgrade(&backend); - spawn_handle.spawn( - "unpin-worker", - None, - async move { - while let Some(message) = rx.next().await { - if let Some(backend) = task_backend.upgrade() { - backend.unpin_block(message); - } else { - log::debug!("Terminating unpin-worker, backend reference was dropped."); - return - } - } - log::debug!("Terminating unpin-worker, stream terminated.") - } - .boxed(), + let (unpin_worker_sender, rx) = tracing_unbounded::>( + "notification-pinning-worker-channel", + 10_000, ); + let unpin_worker = NotificationPinningWorker::new(rx, backend.clone()); + spawn_handle.spawn("notification-pinning-worker", None, Box::pin(unpin_worker.run())); Ok(Client { backend, diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index a13fd4317e15..0703cc2b47d1 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -47,6 +47,7 @@ mod block_rules; mod call_executor; mod client; +mod notification_pinning; mod wasm_override; mod wasm_substitutes; diff --git a/substrate/client/service/src/client/notification_pinning.rs b/substrate/client/service/src/client/notification_pinning.rs new file mode 100644 index 000000000000..80de91c02f1a --- /dev/null +++ b/substrate/client/service/src/client/notification_pinning.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification pinning related logic. +//! +//! This file contains a worker that should be started when a new client instance is created. +//! The goal is to avoid pruning of blocks that have active notifications in the node. Every +//! recipient of notifications should receive the chance to act upon them. In addition, notification +//! listeners can hold onto a [`sc_client_api::UnpinHandle`] to keep a block pinned. Once the handle +//! is dropped, a message is sent and the worker unpins the respective block. +use std::{ + marker::PhantomData, + sync::{Arc, Weak}, +}; + +use futures::StreamExt; +use sc_client_api::{Backend, UnpinWorkerMessage}; + +use sc_utils::mpsc::TracingUnboundedReceiver; +use schnellru::Limiter; +use sp_runtime::traits::Block as BlockT; + +const LOG_TARGET: &str = "db::notification_pinning"; +const NOTIFICATION_PINNING_LIMIT: usize = 1024; + +/// A limiter which automatically unpins blocks that leave the data structure. +#[derive(Clone, Debug)] +struct UnpinningByLengthLimiter> { + max_length: usize, + backend: Weak, + _phantom: PhantomData, +} + +impl> UnpinningByLengthLimiter { + /// Creates a new length limiter with a given `max_length`. + pub fn new(max_length: usize, backend: Weak) -> UnpinningByLengthLimiter { + UnpinningByLengthLimiter { max_length, backend, _phantom: PhantomData::::default() } + } +} + +impl> Limiter + for UnpinningByLengthLimiter +{ + type KeyToInsert<'a> = Block::Hash; + type LinkType = usize; + + fn is_over_the_limit(&self, length: usize) -> bool { + length > self.max_length + } + + fn on_insert( + &mut self, + _length: usize, + key: Self::KeyToInsert<'_>, + value: u32, + ) -> Option<(Block::Hash, u32)> { + log::debug!(target: LOG_TARGET, "Pinning block based on notification. hash = {key}"); + if self.max_length > 0 { + Some((key, value)) + } else { + None + } + } + + fn on_replace( + &mut self, + _length: usize, + _old_key: &mut Block::Hash, + _new_key: Block::Hash, + _old_value: &mut u32, + _new_value: &mut u32, + ) -> bool { + true + } + + fn on_removed(&mut self, key: &mut Block::Hash, references: &mut u32) { + // If reference count was larger than 0 on removal, + // the item was removed due to capacity limitations. + // Since the cache should be large enough for pinned items, + // we want to know about these evictions. + if *references > 0 { + log::warn!( + target: LOG_TARGET, + "Notification block pinning limit reached. Unpinning block with hash = {key:?}" + ); + if let Some(backend) = self.backend.upgrade() { + (0..*references).for_each(|_| backend.unpin_block(*key)); + } + } else { + log::trace!( + target: LOG_TARGET, + "Unpinned block. hash = {key:?}", + ) + } + } + + fn on_cleared(&mut self) {} + + fn on_grow(&mut self, _new_memory_usage: usize) -> bool { + true + } +} + +/// Worker for the handling of notification pinning. +/// +/// It receives messages from a receiver and pins/unpins based on the incoming messages. +/// All notification related unpinning should go through this worker. If the maximum number of +/// notification pins is reached, the block from the oldest notification is unpinned. +pub struct NotificationPinningWorker> { + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Weak, + pinned_blocks: schnellru::LruMap>, +} + +impl> NotificationPinningWorker { + /// Creates a new `NotificationPinningWorker`. + pub fn new( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new( + NOTIFICATION_PINNING_LIMIT, + Arc::downgrade(&task_backend), + ), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn handle_announce_message(&mut self, hash: Block::Hash) { + if let Some(entry) = self.pinned_blocks.get_or_insert(hash, Default::default) { + *entry = *entry + 1; + } + } + + fn handle_unpin_message(&mut self, hash: Block::Hash) -> Result<(), ()> { + if let Some(refcount) = self.pinned_blocks.peek_mut(&hash) { + *refcount = *refcount - 1; + if *refcount == 0 { + self.pinned_blocks.remove(&hash); + } + if let Some(backend) = self.task_backend.upgrade() { + log::debug!(target: LOG_TARGET, "Reducing pinning refcount for block hash = {hash:?}"); + backend.unpin_block(hash); + } else { + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, backend reference was dropped."); + return Err(()) + } + } else { + log::debug!(target: LOG_TARGET, "Received unpin message for already unpinned block. hash = {hash:?}"); + } + Ok(()) + } + + /// Start working on the received messages. + /// + /// The worker maintains a map which keeps track of the pinned blocks and their reference count. + /// Depending upon the received message, it acts to pin/unpin the block. + pub async fn run(mut self) { + while let Some(message) = self.unpin_message_rx.next().await { + match message { + UnpinWorkerMessage::AnnouncePin(hash) => self.handle_announce_message(hash), + UnpinWorkerMessage::Unpin(hash) => + if self.handle_unpin_message(hash).is_err() { + return + }, + } + } + log::debug!(target: LOG_TARGET, "Terminating unpin-worker, stream terminated.") + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use sc_client_api::{Backend, UnpinWorkerMessage}; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use sp_core::H256; + use sp_runtime::traits::Block as BlockT; + + type Block = substrate_test_runtime_client::runtime::Block; + + use super::{NotificationPinningWorker, UnpinningByLengthLimiter}; + + impl> NotificationPinningWorker { + fn new_with_limit( + unpin_message_rx: TracingUnboundedReceiver>, + task_backend: Arc, + limit: usize, + ) -> Self { + let pinned_blocks = + schnellru::LruMap::>::new( + UnpinningByLengthLimiter::new(limit, Arc::downgrade(&task_backend)), + ); + Self { unpin_message_rx, task_backend: Arc::downgrade(&task_backend), pinned_blocks } + } + + fn lru( + &self, + ) -> &schnellru::LruMap> { + &self.pinned_blocks + } + } + + #[test] + fn pinning_worker_handles_base_case() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + + // Block got pinned and unpin message should unpin in the backend. + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + } + + #[test] + fn pinning_worker_handles_multiple_pins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block got pinned multiple times. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(1)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(0)); + } + + #[test] + fn pinning_worker_handles_too_many_unpins() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash = H256::random(); + let hash2 = H256::random(); + + let mut worker = NotificationPinningWorker::new(rx, backend.clone()); + // Block was announced once but unpinned multiple times. The worker should ignore the + // additional unpins. + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + let _ = backend.pin_block(hash); + assert_eq!(backend.pin_refs(&hash), Some(3)); + + worker.handle_announce_message(hash); + assert_eq!(worker.lru().len(), 1); + + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + let _ = worker.handle_unpin_message(hash); + assert_eq!(backend.pin_refs(&hash), Some(2)); + assert!(worker.lru().is_empty()); + + let _ = worker.handle_unpin_message(hash2); + assert!(worker.lru().is_empty()); + assert_eq!(backend.pin_refs(&hash2), None); + } + + #[test] + fn pinning_worker_should_evict_when_limit_reached() { + let (_tx, rx) = tracing_unbounded("testing", 1000); + + let backend = Arc::new(sc_client_api::in_mem::Backend::::new()); + + let hash1 = H256::random(); + let hash2 = H256::random(); + let hash3 = H256::random(); + let hash4 = H256::random(); + + // Only two items fit into the cache. + let mut worker = NotificationPinningWorker::new_with_limit(rx, backend.clone(), 2); + + // Multiple blocks are announced but the cache size is too small. We expect that blocks + // are evicted by the cache and unpinned in the backend. + let _ = backend.pin_block(hash1); + let _ = backend.pin_block(hash2); + let _ = backend.pin_block(hash3); + assert_eq!(backend.pin_refs(&hash1), Some(1)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + worker.handle_announce_message(hash1); + assert!(worker.lru().peek(&hash1).is_some()); + worker.handle_announce_message(hash2); + assert!(worker.lru().peek(&hash2).is_some()); + worker.handle_announce_message(hash3); + assert!(worker.lru().peek(&hash3).is_some()); + assert!(worker.lru().peek(&hash2).is_some()); + assert_eq!(worker.lru().len(), 2); + + // Hash 1 should have gotten unpinned, since its oldest. + assert_eq!(backend.pin_refs(&hash1), Some(0)); + assert_eq!(backend.pin_refs(&hash2), Some(1)); + assert_eq!(backend.pin_refs(&hash3), Some(1)); + + // Hash 2 is getting bumped. + worker.handle_announce_message(hash2); + assert_eq!(worker.lru().peek(&hash2), Some(&2)); + + // Since hash 2 was accessed, evict hash 3. + worker.handle_announce_message(hash4); + assert_eq!(worker.lru().peek(&hash3), None); + } +} From d05f8c57fb29b3c156e542c5473cbb09fae359bb Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Mon, 26 Feb 2024 21:40:02 +0400 Subject: [PATCH 41/51] Limit max execution time for `test-linux-stable` CI jobs (#3483) Tests run as part of the `test-linux-stable` jobs could hang as shown [here](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/5341393). This PR adds a failsafe for such cases. --- .gitlab/pipeline/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index b5e26d194896..9f774aab2719 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -48,6 +48,7 @@ test-linux-stable: - target/nextest/default/junit.xml reports: junit: target/nextest/default/junit.xml + timeout: 90m test-linux-oldkernel-stable: extends: test-linux-stable From 4080632ee05fc3d7734c856b3af3a024d63197ef Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 26 Feb 2024 18:23:37 +0100 Subject: [PATCH 42/51] [prdoc] Validate crate names (#3467) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: - Add CI script to check that the `crate` names that are mentioned in prdocs are valid. We can extend it lateron to also validate the correct SemVer bumps as introduced in https://github.com/paritytech/polkadot-sdk/pull/3441. Example output: ```pre $ python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc 🔎 Reading workspace polkadot-sdk/Cargo.toml. 📦 Checking 36 prdocs against 494 crates. ✅ All prdocs are valid. ``` Note that not all old prdocs pass the check since crates have been renamed: ```pre $ python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/**/*.prdoc 🔎 Reading workspace polkadot-sdk/Cargo.toml. 📦 Checking 186 prdocs against 494 crates. ❌ Some prdocs are invalid. 💥 prdoc/1.4.0/pr_1926.prdoc lists invalid crate: node-cli 💥 prdoc/1.4.0/pr_2086.prdoc lists invalid crate: xcm-executor 💥 prdoc/1.4.0/pr_2107.prdoc lists invalid crate: xcm 💥 prdoc/1.6.0/pr_2684.prdoc lists invalid crate: xcm-builder ``` --------- Signed-off-by: Oliver Tale-Yazdi --- .github/scripts/check-prdoc.py | 71 +++++++++++++++++++++++++++++++ .github/workflows/check-prdoc.yml | 8 ++++ prdoc/1.4.0/pr_1246.prdoc | 2 +- prdoc/1.6.0/pr_2689.prdoc | 2 +- prdoc/1.6.0/pr_2771.prdoc | 2 +- prdoc/pr_3412.prdoc | 2 +- 6 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 .github/scripts/check-prdoc.py diff --git a/.github/scripts/check-prdoc.py b/.github/scripts/check-prdoc.py new file mode 100644 index 000000000000..42b063f2885d --- /dev/null +++ b/.github/scripts/check-prdoc.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 + +''' +Ensure that the prdoc files are valid. + +# Example + +```sh +python3 -m pip install cargo-workspace +python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/*.prdoc +``` + +Produces example output: +```pre +🔎 Reading workspace polkadot-sdk/Cargo.toml +📦 Checking 32 prdocs against 493 crates. +✅ All prdocs are valid +``` +''' + +import os +import yaml +import argparse +import cargo_workspace + +def check_prdoc_crate_names(root, paths): + ''' + Check that all crates of the `crates` section of each prdoc is present in the workspace. + ''' + + print(f'🔎 Reading workspace {root}.') + workspace = cargo_workspace.Workspace.from_path(root) + crate_names = [crate.name for crate in workspace.crates] + + print(f'📦 Checking {len(paths)} prdocs against {len(crate_names)} crates.') + faulty = {} + + for path in paths: + with open(path, 'r') as f: + prdoc = yaml.safe_load(f) + + for crate in prdoc.get('crates', []): + crate = crate['name'] + if crate in crate_names: + continue + + faulty.setdefault(path, []).append(crate) + + if len(faulty) == 0: + print('✅ All prdocs are valid.') + else: + print('❌ Some prdocs are invalid.') + for path, crates in faulty.items(): + print(f'💥 {path} lists invalid crate: {", ".join(crates)}') + exit(1) + +def parse_args(): + parser = argparse.ArgumentParser(description='Check prdoc files') + parser.add_argument('root', help='The cargo workspace manifest', metavar='root', type=str, nargs=1) + parser.add_argument('prdoc', help='The prdoc files', metavar='prdoc', type=str, nargs='*') + args = parser.parse_args() + + if len(args.prdoc) == 0: + print('❌ Need at least one prdoc file as argument.') + exit(1) + + return { 'root': os.path.abspath(args.root[0]), 'prdocs': args.prdoc } + +if __name__ == '__main__': + args = parse_args() + check_prdoc_crate_names(args['root'], args['prdocs']) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 91701ca036e0..c31dee06ec54 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -57,3 +57,11 @@ jobs: echo "Checking for PR#${GITHUB_PR}" echo "You can find more information about PRDoc at $PRDOC_DOC" $ENGINE run --rm -v $PWD:/repo -e RUST_LOG=info $IMAGE check -n ${GITHUB_PR} + + - name: Validate prdoc for PR#${{ github.event.pull_request.number }} + if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} + run: | + echo "Validating PR#${GITHUB_PR}" + python3 --version + python3 -m pip install cargo-workspace==1.2.1 + python3 .github/scripts/check-prdoc.py Cargo.toml prdoc/pr_${GITHUB_PR}.prdoc diff --git a/prdoc/1.4.0/pr_1246.prdoc b/prdoc/1.4.0/pr_1246.prdoc index a4d270c45cb5..3b5c2017f22a 100644 --- a/prdoc/1.4.0/pr_1246.prdoc +++ b/prdoc/1.4.0/pr_1246.prdoc @@ -11,7 +11,7 @@ migrations: description: "Messages from the DMP dispatch queue will be moved over to the MQ pallet via `on_initialize`. This happens over multiple blocks and emits a `Completed` event at the end. The pallet can be un-deployed and deleted afterwards. Note that the migration reverses the order of messages, which should be acceptable as a one-off." crates: - - name: cumulus_pallet_xcmp_queue + - name: cumulus-pallet-xcmp-queue note: Pallet config must be altered according to the MR description. host_functions: [] diff --git a/prdoc/1.6.0/pr_2689.prdoc b/prdoc/1.6.0/pr_2689.prdoc index 847c3e8026ce..5d3081e3a4ce 100644 --- a/prdoc/1.6.0/pr_2689.prdoc +++ b/prdoc/1.6.0/pr_2689.prdoc @@ -1,7 +1,7 @@ # Schema: Parity PR Documentation Schema (prdoc) # See doc at https://github.com/paritytech/prdoc -title: BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators +title: "BEEFY: Support compatibility with Warp Sync - Allow Warp Sync for Validators" doc: - audience: Node Operator diff --git a/prdoc/1.6.0/pr_2771.prdoc b/prdoc/1.6.0/pr_2771.prdoc index 1b49162e4392..50fb99556ecd 100644 --- a/prdoc/1.6.0/pr_2771.prdoc +++ b/prdoc/1.6.0/pr_2771.prdoc @@ -6,4 +6,4 @@ doc: Enable better req-response protocol versioning, by allowing for fallback requests on different protocols. crates: - - name: sc_network + - name: sc-network diff --git a/prdoc/pr_3412.prdoc b/prdoc/pr_3412.prdoc index d68f05e45dcf..1ee6edfeb837 100644 --- a/prdoc/pr_3412.prdoc +++ b/prdoc/pr_3412.prdoc @@ -13,5 +13,5 @@ doc: crates: - name: pallet-babe - - name: pallet-aura-ext + - name: cumulus-pallet-aura-ext - name: pallet-session From 81d447a8feca14222fef57ffa721302a8b808562 Mon Sep 17 00:00:00 2001 From: philoniare Date: Tue, 27 Feb 2024 04:40:13 +0800 Subject: [PATCH 43/51] Cleanup String::from_utf8 (#3446) # Description *Refactors `String::from_utf8` usage in the pallet benchmarking Fixes #389 --------- Co-authored-by: command-bot <> --- .../benchmarking-cli/src/pallet/command.rs | 50 ++++++++++--------- .../benchmarking-cli/src/pallet/writer.rs | 46 +++++++---------- 2 files changed, 44 insertions(+), 52 deletions(-) diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index d5dc6226ab9f..c6ba28240167 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -298,16 +298,23 @@ impl PalletCmd { // Convert `Vec` to `String` for better readability. let benchmarks_to_run: Vec<_> = benchmarks_to_run .into_iter() - .map(|b| { + .map(|(pallet, extrinsic, components, pov_modes)| { + let pallet_name = + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"); + let extrinsic_name = + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"); ( - b.0, - b.1, - b.2, - b.3.into_iter() + pallet, + extrinsic, + components, + pov_modes + .into_iter() .map(|(p, s)| { (String::from_utf8(p).unwrap(), String::from_utf8(s).unwrap()) }) .collect(), + pallet_name, + extrinsic_name, ) }) .collect(); @@ -329,12 +336,12 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; - for (pallet, extrinsic, components, _) in benchmarks_to_run.clone() { + for (pallet, extrinsic, components, _, pallet_name, extrinsic_name) in + benchmarks_to_run.clone() + { log::info!( target: LOG_TARGET, - "Starting benchmark: {}::{}", - String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + "Starting benchmark: {pallet_name}::{extrinsic_name}" ); let all_components = if components.is_empty() { vec![Default::default()] @@ -415,12 +422,7 @@ impl PalletCmd { ) .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? .map_err(|e| { - format!( - "Benchmark {}::{} failed: {}", - String::from_utf8_lossy(&pallet), - String::from_utf8_lossy(&extrinsic), - e - ) + format!("Benchmark {pallet_name}::{extrinsic_name} failed: {e}",) })?; } // Do one loop of DB tracking. @@ -494,11 +496,7 @@ impl PalletCmd { log::info!( target: LOG_TARGET, - "Running benchmark: {}.{}({} args) {}/{} {}/{}", - String::from_utf8(pallet.clone()) - .expect("Encoded from String; qed"), - String::from_utf8(extrinsic.clone()) - .expect("Encoded from String; qed"), + "Running benchmark: {pallet_name}::{extrinsic_name}({} args) {}/{} {}/{}", components.len(), s + 1, // s starts at 0. all_components.len(), @@ -707,12 +705,14 @@ impl PalletCmd { Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, ) -> Result { use std::collections::hash_map::Entry; let mut parsed = PovModesMap::new(); - for (pallet, call, _components, pov_modes) in benchmarks { + for (pallet, call, _components, pov_modes, _, _) in benchmarks { for (pallet_storage, mode) in pov_modes { let mode = PovEstimationMode::from_str(&mode)?; let splits = pallet_storage.split("::").collect::>(); @@ -766,6 +766,8 @@ fn list_benchmark( Vec, Vec<(BenchmarkParameter, u32, u32)>, Vec<(String, String)>, + String, + String, )>, list_output: ListOutput, no_csv_header: bool, @@ -773,11 +775,11 @@ fn list_benchmark( let mut benchmarks = BTreeMap::new(); // Sort and de-dub by pallet and function name. - benchmarks_to_run.iter().for_each(|(pallet, extrinsic, _, _)| { + benchmarks_to_run.iter().for_each(|(_, _, _, _, pallet_name, extrinsic_name)| { benchmarks - .entry(String::from_utf8_lossy(pallet).to_string()) + .entry(pallet_name) .or_insert_with(BTreeSet::new) - .insert(String::from_utf8_lossy(extrinsic).to_string()); + .insert(extrinsic_name); }); match list_output { diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs index 9493a693bbed..bd4b65d8a2e3 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -153,8 +153,8 @@ fn map_results( continue } - let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); - let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); + let pallet_name = String::from_utf8(batch.pallet.clone()).unwrap(); + let instance_name = String::from_utf8(batch.instance.clone()).unwrap(); let benchmark_data = get_benchmark_data( batch, storage_info, @@ -166,7 +166,7 @@ fn map_results( worst_case_map_values, additional_trie_layers, ); - let pallet_benchmarks = all_benchmarks.entry((pallet_string, instance_string)).or_default(); + let pallet_benchmarks = all_benchmarks.entry((pallet_name, instance_name)).or_default(); pallet_benchmarks.push(benchmark_data); } Ok(all_benchmarks) @@ -571,19 +571,22 @@ pub(crate) fn process_storage_results( let mut prefix_result = result.clone(); let key_info = storage_info_map.get(&prefix); + let pallet_name = match key_info { + Some(k) => String::from_utf8(k.pallet_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; + let storage_name = match key_info { + Some(k) => String::from_utf8(k.storage_name.clone()).expect("encoded from string"), + None => "".to_string(), + }; let max_size = key_info.and_then(|k| k.max_size); let override_pov_mode = match key_info { - Some(StorageInfo { pallet_name, storage_name, .. }) => { - let pallet_name = - String::from_utf8(pallet_name.clone()).expect("encoded from string"); - let storage_name = - String::from_utf8(storage_name.clone()).expect("encoded from string"); - + Some(_) => { // Is there an override for the storage key? - pov_modes.get(&(pallet_name.clone(), storage_name)).or( + pov_modes.get(&(pallet_name.clone(), storage_name.clone())).or( // .. or for the storage prefix? - pov_modes.get(&(pallet_name, "ALL".to_string())).or( + pov_modes.get(&(pallet_name.clone(), "ALL".to_string())).or( // .. or for the benchmark? pov_modes.get(&("ALL".to_string(), "ALL".to_string())), ), @@ -662,15 +665,10 @@ pub(crate) fn process_storage_results( // writes. if !is_prefix_identified { match key_info { - Some(key_info) => { + Some(_) => { let comment = format!( "Storage: `{}::{}` (r:{} w:{})", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), - reads, - writes, + pallet_name, storage_name, reads, writes, ); comments.push(comment) }, @@ -698,11 +696,7 @@ pub(crate) fn process_storage_results( ) { Some(new_pov) => { let comment = format!( - "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", - String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"), + "Proof: `{pallet_name}::{storage_name}` (`max_values`: {:?}, `max_size`: {:?}, added: {}, mode: `{:?}`)", key_info.max_values, key_info.max_size, new_pov, @@ -711,13 +705,9 @@ pub(crate) fn process_storage_results( comments.push(comment) }, None => { - let pallet = String::from_utf8(key_info.pallet_name.clone()) - .expect("encoded from string"); - let item = String::from_utf8(key_info.storage_name.clone()) - .expect("encoded from string"); let comment = format!( "Proof: `{}::{}` (`max_values`: {:?}, `max_size`: {:?}, mode: `{:?}`)", - pallet, item, key_info.max_values, key_info.max_size, + pallet_name, storage_name, key_info.max_values, key_info.max_size, used_pov_mode, ); comments.push(comment); From f5de090aa56d0801f043c2b13892191b3d0c9f50 Mon Sep 17 00:00:00 2001 From: Javier Viola <363911+pepoviola@users.noreply.github.com> Date: Tue, 27 Feb 2024 08:24:07 -0300 Subject: [PATCH 44/51] fix(zombienet): increase timeout in download artifacts (#3376) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix timeouts downloading the artifacts (e.g https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/5246040) Thx! --------- Co-authored-by: Bastian Köcher --- polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl index db0a60ac1df6..5fe1b2ad2f1a 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.zndsl @@ -10,9 +10,8 @@ dave: parachain 2001 block height is at least 10 within 200 seconds # POLKADOT_PR_ARTIFACTS_URL=https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/1842869/artifacts/raw/artifacts # with the version of polkadot you want to download. -# avg 30s in our infra -alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds -bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 60 seconds +alice: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds +bob: run ./0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_ARTIFACTS_URL}}" within 240 seconds # update the cmd to add the flag '--insecure-validator-i-know-what-i-do' # once the base image include the version with this flag we can remove this logic. alice: run ./0002-update-cmd.sh within 60 seconds From 94c54d50322076e78a687785594eb84c78b823d7 Mon Sep 17 00:00:00 2001 From: Clara van Staden Date: Tue, 27 Feb 2024 14:49:31 +0200 Subject: [PATCH 45/51] Snowbridge benchmark tests fix (#3424) When running `cargo test -p bridge-hub-rococo-runtime --features runtime-benchmarks`, two of the Snowbridge benchmark tests fails. The reason is that when the runtime-benchmarks feature is enabled, the `NoopMessageProcessor` message processor is used. The Snowbridge tests rely on the outbound messages to be processed using the message queue, so that we can check the expected nonce and block digest logs. This PR changes the conditional compilation to only use `NoopMessageProcessor` when compiling the executable to run benchmarks against, not when running tests. --------- Co-authored-by: claravanstaden --- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 15cf045a9956..f576776696c3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -102,8 +102,6 @@ use polkadot_runtime_common::prod_or_fast; #[cfg(feature = "runtime-benchmarks")] use benchmark_helpers::DoNothingRouter; -#[cfg(not(feature = "runtime-benchmarks"))] -use bridge_hub_common::BridgeHubMessageRouter; /// The address format for describing accounts. pub type Address = MultiAddress; @@ -367,11 +365,15 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] + // Use the NoopMessageProcessor exclusively for benchmarks, not for tests with the + // runtime-benchmarks feature as tests require the BridgeHubMessageRouter to process messages. + // The "test" feature flag doesn't work, hence the reliance on the "std" feature, which is + // enabled during tests. + #[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = BridgeHubMessageRouter< + #[cfg(not(all(not(feature = "std"), feature = "runtime-benchmarks")))] + type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, xcm_executor::XcmExecutor, From 2cdda0e62dd3088d2fd09cea627059674070c277 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 27 Feb 2024 14:16:23 +0100 Subject: [PATCH 46/51] Bridge zombienet tests: Check amount received at destination (#3490) Related to https://github.com/paritytech/polkadot-sdk/issues/3475 --- .../rococo-westend/bridges_rococo_westend.sh | 12 ++++++++---- .../js-helpers/native-assets-balance-increased.js | 5 +++-- .../0001-asset-transfer/roc-reaches-westend.zndsl | 8 ++++---- .../0001-asset-transfer/wnd-reaches-rococo.zndsl | 8 ++++---- .../0001-asset-transfer/wroc-reaches-rococo.zndsl | 8 ++++---- .../0001-asset-transfer/wwnd-reaches-westend.zndsl | 8 ++++---- 6 files changed, 27 insertions(+), 22 deletions(-) diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index de5be2e0af88..479ab833abfc 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -319,6 +319,7 @@ case "$1" in $XCM_VERSION ;; reserve-transfer-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send ROCs to Alice account on AHW limited_reserve_transfer_assets \ @@ -326,11 +327,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-rococo-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedWNDs to Alice account on AHW limited_reserve_transfer_assets \ @@ -338,11 +340,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; reserve-transfer-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ @@ -350,11 +353,12 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 5000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; withdraw-reserve-assets-from-asset-hub-westend-local) + amount=$2 ensure_polkadot_js_api # send back only 100000000000 wrappedROCs to Alice account on AHR limited_reserve_transfer_assets \ @@ -362,7 +366,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 3000000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": '$amount' } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js index a35c753d9732..749c3e2fec32 100644 --- a/bridges/testing/framework/js-helpers/native-assets-balance-increased.js +++ b/bridges/testing/framework/js-helpers/native-assets-balance-increased.js @@ -3,12 +3,13 @@ async function run(nodeName, networkInfo, args) { const api = await zombie.connect(wsUri, userDefinedTypes); const accountAddress = args[0]; + const expectedIncrease = BigInt(args[1]); const initialAccountData = await api.query.system.account(accountAddress); const initialAccountBalance = initialAccountData.data['free']; while (true) { const accountData = await api.query.system.account(accountAddress); const accountBalance = accountData.data['free']; - if (accountBalance > initialAccountBalance) { + if (accountBalance > initialAccountBalance + expectedIncrease) { return accountBalance; } @@ -17,4 +18,4 @@ async function run(nodeName, networkInfo, args) { } } -module.exports = { run } +module.exports = {run} diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index 4725362ae826..a58520ccea65 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 120 seconds +# send 5 ROC to //Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds -# check that //Alice received the ROC on Westend AH -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 300 seconds +# check that //Alice received at least 4.8 ROC on Westend AH +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 300 seconds # check that the relayer //Charlie is rewarded by Westend AH bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index 77267239c3b1..fedb78cc2103 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -2,11 +2,11 @@ Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config -# send WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 120 seconds +# send 5 WND to //Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds -# check that //Alice received the WND on Rococo AH -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 300 seconds +# check that //Alice received at least 4.8 WND on Rococo AH +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 300 seconds # check that the relayer //Charlie is rewarded by Rococo AH bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl index f72b76e6026b..68b888b6858e 100644 --- a/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wroc-reaches-rococo.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wROC back to Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 120 seconds +# send 3 wROC back to Alice from Westend AH to Rococo AH +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-westend-local 3000000000000" within 120 seconds -# check that //Alice received the wROC on Rococo AH +# check that //Alice received at least 2.8 wROC on Rococo AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl index 9d893c8d90a8..1a8a16181954 100644 --- a/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wwnd-reaches-westend.zndsl @@ -2,9 +2,9 @@ Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config -# send wWND back to Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 120 seconds +# send 3 wWND back to Alice from Rococo AH to Westend AH +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local 3000000000000" within 120 seconds -# check that //Alice received the wWND on Westend AH +# check that //Alice received at least 2.8 wWND on Westend AH # (we wait until //Alice account increases here - there are no other transactions that may increase it) -asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 300 seconds +asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,2800000000000" within 300 seconds From 29369a4e7ca92d38a305a3a3115609718785eba9 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 27 Feb 2024 14:50:21 +0000 Subject: [PATCH 47/51] Add documentation around FRAME Origin (#3362) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Does the following: - Add a reference doc page named `frame_runtime_types`, which explains what types like `RuntimeOrigin`, `RuntimeCall` etc are. - On top of it, it adds a reference doc page called `frame_origin` which explains a few important patterns that we use around origins - And finally brushes up `#[frame::origin]` docs. - Updates the theme, sidebar and favicon to look like: Screenshot 2024-02-20 at 12 16 00 All of this was inspired by https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin/10993 closes https://github.com/paritytech/polkadot-sdk-docs/issues/45 closes https://github.com/paritytech/polkadot-sdk-docs/issues/43 contributes / overlaps with https://github.com/paritytech/polkadot-sdk/pull/2638 cc @liamaharon deprecation companion: https://github.com/substrate-developer-hub/substrate-docs/pull/2131 pba-content companion: https://github.com/Polkadot-Blockchain-Academy/pba-content/pull/977 --------- Co-authored-by: Radha <86818441+DrW3RK@users.noreply.github.com> Co-authored-by: Sebastian Kunert Co-authored-by: Gonçalo Pestana Co-authored-by: Liam Aharon --- .gitlab/pipeline/build.yml | 2 +- Cargo.lock | 9 + docs/mermaid/IA.mmd | 2 +- docs/mermaid/outer_runtime_types.mmd | 3 + docs/sdk/Cargo.toml | 20 +- docs/sdk/headers/header.html | 139 ++++++++ docs/sdk/headers/theme.css | 16 + docs/sdk/headers/toc.html | 54 ---- docs/sdk/src/guides/your_first_pallet/mod.rs | 9 +- docs/sdk/src/lib.rs | 7 +- docs/sdk/src/meta_contributing.rs | 6 +- .../src/reference_docs/extrinsic_encoding.rs | 2 +- .../reference_docs/frame_composite_enums.rs | 1 - docs/sdk/src/reference_docs/frame_origin.rs | 270 +++++++++++++++- .../src/reference_docs/frame_runtime_types.rs | 306 ++++++++++++++++++ docs/sdk/src/reference_docs/mod.rs | 8 +- substrate/frame/src/lib.rs | 6 + .../src/construct_runtime/expand/origin.rs | 2 +- substrate/frame/support/procedural/src/lib.rs | 17 +- substrate/frame/support/src/lib.rs | 68 +++- substrate/frame/system/src/lib.rs | 1 + 21 files changed, 839 insertions(+), 109 deletions(-) create mode 100644 docs/mermaid/outer_runtime_types.mmd create mode 100644 docs/sdk/headers/header.html create mode 100644 docs/sdk/headers/theme.css delete mode 100644 docs/sdk/headers/toc.html delete mode 100644 docs/sdk/src/reference_docs/frame_composite_enums.rs create mode 100644 docs/sdk/src/reference_docs/frame_runtime_types.rs diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 15b4869997be..423587c1fb57 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -91,7 +91,7 @@ build-rustdoc: - .run-immediately variables: SKIP_WASM_BUILD: 1 - RUSTDOCFLAGS: "" + RUSTDOCFLAGS: "--default-theme=ayu --html-in-header ./docs/sdk/headers/header.html --extend-css ./docs/sdk/headers/theme.css" artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" when: on_success diff --git a/Cargo.lock b/Cargo.lock index 410b45d00183..c52feafe7a70 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13384,11 +13384,20 @@ dependencies = [ "cumulus-pallet-parachain-system", "docify", "frame", + "frame-system", "kitchensink-runtime", "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collective", "pallet-default-config-example", + "pallet-democracy", "pallet-examples", + "pallet-multisig", + "pallet-proxy", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "sc-cli", "sc-client-db", diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 93d3e92814cf..4eb50bcf96a8 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -3,7 +3,7 @@ flowchart devhub --> polkadot_sdk devhub --> reference_docs - devhub --> tutorial + devhub --> guides polkadot_sdk --> substrate polkadot_sdk --> frame diff --git a/docs/mermaid/outer_runtime_types.mmd b/docs/mermaid/outer_runtime_types.mmd new file mode 100644 index 000000000000..c909df16af1f --- /dev/null +++ b/docs/mermaid/outer_runtime_types.mmd @@ -0,0 +1,3 @@ +flowchart LR + RuntimeCall --"TryInto"--> PalletCall + PalletCall --"Into"--> RuntimeCall diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 05aced8751ae..576a81834f8c 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -17,7 +17,10 @@ workspace = true # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +frame = { path = "../../substrate/frame", features = [ + "experimental", + "runtime", +] } pallet-examples = { path = "../../substrate/frame/examples" } pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } @@ -52,7 +55,18 @@ cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-syst ] } parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } pallet-aura = { path = "../../substrate/frame/aura", default-features = false } + +# Pallets and FRAME internals pallet-timestamp = { path = "../../substrate/frame/timestamp" } +pallet-balances = { path = "../../substrate/frame/balances" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-utility = { path = "../../substrate/frame/utility" } +pallet-multisig = { path = "../../substrate/frame/multisig" } +pallet-proxy = { path = "../../substrate/frame/proxy" } +pallet-authorship = { path = "../../substrate/frame/authorship" } +pallet-collective = { path = "../../substrate/frame/collective" } +pallet-democracy = { path = "../../substrate/frame/democracy" } +frame-system = { path = "../../substrate/frame/system" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -64,9 +78,5 @@ sp-runtime = { path = "../../substrate/primitives/runtime" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } -[dev-dependencies] -parity-scale-codec = "3.6.5" -scale-info = "2.9.0" - [features] experimental = ["pallet-aura/experimental"] diff --git a/docs/sdk/headers/header.html b/docs/sdk/headers/header.html new file mode 100644 index 000000000000..68dc5d5509e6 --- /dev/null +++ b/docs/sdk/headers/header.html @@ -0,0 +1,139 @@ + + + diff --git a/docs/sdk/headers/theme.css b/docs/sdk/headers/theme.css new file mode 100644 index 000000000000..bb9254ec4a82 --- /dev/null +++ b/docs/sdk/headers/theme.css @@ -0,0 +1,16 @@ +:root { + --polkadot-pink: #E6007A ; + --polkadot-green: #56F39A ; + --polkadot-lime: #D3FF33 ; + --polkadot-cyan: #00B2FF ; + --polkadot-purple: #552BBF ; + } + +body > nav.sidebar > div.sidebar-crate > a > img { + /* logo width, given that the sidebar width is 200px; */ + width: 190px; +} + +body nav.sidebar { + flex: 0 0 250px; +} diff --git a/docs/sdk/headers/toc.html b/docs/sdk/headers/toc.html deleted file mode 100644 index a4a074cb4f31..000000000000 --- a/docs/sdk/headers/toc.html +++ /dev/null @@ -1,54 +0,0 @@ - - diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 29cdda36ed15..20a8639b2700 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -128,8 +128,8 @@ //! //! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, //! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or -//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust -//! traits and generics work. If unfamiliar with this pattern, read +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how +//! Rust traits and generics work. If unfamiliar with this pattern, read //! [`crate::reference_docs::trait_based_programming`] before going further. //! //! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` @@ -270,7 +270,7 @@ #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] //! //! > These `Runtime*` types are better explained in -//! > [`crate::reference_docs::frame_composite_enums`]. +//! > [`crate::reference_docs::frame_runtime_types`]. //! //! Then, we can rewrite the `transfer` dispatchable as such: #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] @@ -285,7 +285,6 @@ //! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. //! //! -//! //! ## What Next? //! //! The following topics where used in this guide, but not covered in depth. It is suggested to @@ -293,7 +292,7 @@ //! //! - [`crate::reference_docs::safe_defensive_programming`]. //! - [`crate::reference_docs::frame_origin`]. -//! - [`crate::reference_docs::frame_composite_enums`]. +//! - [`crate::reference_docs::frame_runtime_types`]. //! - The pallet we wrote in this guide was using `dev_mode`, learn more in //! [`frame::pallet_macros::config`]. //! - Learn more about the individual pallet items/macros, such as event and errors and call, in diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index 075d9ddaffe5..e211476d2514 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -15,7 +15,7 @@ //! - Start by learning about the the [`polkadot_sdk`], its structure and context. //! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important //! user-journeys of the Polkadot SDK. -//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Whilst reading the guides, you might find back-links to [`reference_docs`]. //! - Finally, is the parent website of this crate that contains the //! list of further tools related to the Polkadot SDK. //! @@ -25,6 +25,11 @@ #![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] #![warn(rustdoc::broken_intra_doc_links)] #![warn(rustdoc::private_intra_doc_links)] +#![doc(html_favicon_url = "https://polkadot.network/favicon-32x32.png")] +#![doc( + html_logo_url = "https://europe1.discourse-cdn.com/standard21/uploads/polkadot2/original/1X/eb57081e2bb7c39e5fcb1a98b443e423fa4448ae.svg" +)] +#![doc(issue_tracker_base_url = "https://github.com/paritytech/polkadot-sdk/issues")] /// Meta information about this crate, how it is built, what principles dictates its evolution and /// how one can contribute to it. diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index 7ecf8b0adfd3..fcdcea9934bb 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -101,7 +101,7 @@ //! * Before even getting started, what is with all of this ``? We link to //! [`crate::reference_docs::trait_based_programming`]. //! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is -//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! explained in [`crate::reference_docs::frame_runtime_types`]. Build on top of this! //! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. //! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be //! explained in the documentation of [`frame::prelude::DispatchResult`]. @@ -138,7 +138,9 @@ //! injected, run: //! //! ```sh -//! SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/toc.html" cargo doc -p polkadot-sdk-docs --no-deps --open +//! SKIP_WASM_BUILD=1 \ +//! RUSTDOCFLAGS="--html-in-header $(pwd)/docs/sdk/headers/header.html --extend-css $(pwd)/docs/sdk/headers/theme.css --default-theme=ayu" \ +//! cargo doc -p polkadot-sdk-docs --no-deps --open //! ``` //! //! If even faster build time for docs is needed, you can temporarily remove most of the diff --git a/docs/sdk/src/reference_docs/extrinsic_encoding.rs b/docs/sdk/src/reference_docs/extrinsic_encoding.rs index 9008f8f835f5..8c8568a228fa 100644 --- a/docs/sdk/src/reference_docs/extrinsic_encoding.rs +++ b/docs/sdk/src/reference_docs/extrinsic_encoding.rs @@ -127,7 +127,7 @@ //! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME //! pallet being called, and the inner enum represents the call being made within that pallet, and //! any arguments to it. Read more about the call enum -//! [here][crate::reference_docs::frame_composite_enums]. +//! [here][crate::reference_docs::frame_runtime_types]. //! //! FRAME `Call` enums are automatically generated, and end up looking something like this: #![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] diff --git a/docs/sdk/src/reference_docs/frame_composite_enums.rs b/docs/sdk/src/reference_docs/frame_composite_enums.rs deleted file mode 100644 index 6051cd534467..000000000000 --- a/docs/sdk/src/reference_docs/frame_composite_enums.rs +++ /dev/null @@ -1 +0,0 @@ -//! # FRAME Composite Enums diff --git a/docs/sdk/src/reference_docs/frame_origin.rs b/docs/sdk/src/reference_docs/frame_origin.rs index a4078377cd77..49533157b014 100644 --- a/docs/sdk/src/reference_docs/frame_origin.rs +++ b/docs/sdk/src/reference_docs/frame_origin.rs @@ -1,14 +1,260 @@ //! # FRAME Origin //! -//! Notes: -//! -//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in -//! Protocol Berg 2023 -//! - system's raw origin, how it is amalgamated with other origins into one type -//! [`frame_composite_enums`] -//! - signed origin -//! - unsigned origin, link to [`fee_less_runtime`] -//! - Root origin, how no one can obtain it. -//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of -//! that body or half of the token holders". -//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. +//! Let's start by clarifying a common wrong assumption about Origin: +//! +//! **ORIGIN IS NOT AN ACCOUNT ID**. +//! +//! FRAME's origin abstractions allow you to convey meanings far beyond just an account-id being the +//! caller of an extrinsic. Nonetheless, an account-id having signed an extrinsic is one of the +//! meanings that an origin can convey. This is the commonly used [`frame_system::ensure_signed`], +//! where the return value happens to be an account-id. +//! +//! Instead, let's establish the following as the correct definition of an origin: +//! +//! > The origin type represents the privilege level of the caller of an extrinsic. +//! +//! That is, an extrinsic, through checking the origin, can *express what privilege level it wishes +//! to impose on the caller of the extrinsic*. One of those checks can be as simple as "*any account +//! that has signed a statement can pass*". +//! +//! But the origin system can also express more abstract and complicated privilege levels. For +//! example: +//! +//! * If the majority of token holders agreed upon this. This is more or less what the +//! [`pallet_democracy`] does under the hood ([reference](https://github.com/paritytech/polkadot-sdk/blob/edd95b3749754d2ed0c5738588e872c87be91624/substrate/frame/democracy/src/lib.rs#L1603-L1633)). +//! * If a specific ratio of an instance of [`pallet_collective`]/DAO agrees upon this. +//! * If another consensus system, for example a bridged network or a parachain, agrees upon this. +//! * If the majority of validator/authority set agrees upon this[^1]. +//! * If caller holds a particular NFT. +//! +//! and many more. +//! +//! ## Context +//! +//! First, let's look at where the `origin` type is encountered in a typical pallet. The `origin: +//! OriginFor` has to be the first argument of any given callable extrinsic in FRAME: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", call_simple)] +//! +//! Typically, the code of an extrinsic starts with an origin check, such as +//! [`frame_system::ensure_signed`]. +//! +//! Note that [`OriginFor`](frame_system::pallet_prelude::OriginFor) is merely a shorthand for +//! [`frame_system::Config::RuntimeOrigin`]. Given the name prefix `Runtime`, we can learn that +//! `RuntimeOrigin` is similar to `RuntimeCall` and others, a runtime composite enum that is +//! amalgamated at the runtime level. Read [`crate::reference_docs::frame_runtime_types`] to +//! familiarize yourself with these types. +//! +//! To understand this better, we will next create a pallet with a custom origin, which will add a +//! new variant to `RuntimeOrigin`. +//! +//! ## Adding Custom Pallet Origin to the Runtime +//! +//! For example, given a pallet that defines the following custom origin: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin)] +//! +//! And a runtime with the following pallets: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", runtime_exp)] +//! +//! The type [`crate::reference_docs::frame_origin::runtime_for_origin::RuntimeOrigin`] is expanded. +//! This `RuntimeOrigin` contains a variant for the [`frame_system::RawOrigin`] and the custom +//! origin of the pallet. +//! +//! > Notice how the [`frame_system::ensure_signed`] is nothing more than a `match` statement. If +//! > you want to know where the actual origin of an extrinsic is set (and the signature +//! > verification happens, if any), see +//! > [`sp_runtime::generic::CheckedExtrinsic#trait-implementations`], specifically +//! > [`sp_runtime::traits::Applyable`]'s implementation. +//! +//! ## Asserting on a Custom Internal Origin +//! +//! In order to assert on a custom origin that is defined within your pallet, we need a way to first +//! convert the `::RuntimeOrigin` into the local `enum Origin` of the +//! current pallet. This is a common process that is explained in +//! [`crate::reference_docs::frame_runtime_types# +//! adding-further-constraints-to-runtime-composite-enums`]. +//! +//! We use the same process here to express that `RuntimeOrigin` has a number of additional bounds, +//! as follows. +//! +//! 1. Defining a custom `RuntimeOrigin` with further bounds in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_bound)] +//! +//! 2. Using it in the pallet. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", custom_origin_usage)] +//! +//! ## Asserting on a Custom External Origin +//! +//! Very often, a pallet wants to have a parameterized origin that is **NOT** defined within the +//! pallet. In other words, a pallet wants to delegate an origin check to something that is +//! specified later at the runtime level. Like many other parameterizations in FRAME, this implies +//! adding a new associated type to `trait Config`. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_def)] +//! +//! Then, within the pallet, we can simply use this "unknown" origin check type: +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_usage)] +//! +//! Finally, at the runtime, any implementation of [`frame::traits::EnsureOrigin`] can be passed. +#![doc = docify::embed!("./src/reference_docs/frame_origin.rs", external_origin_provide)] +//! +//! Indeed, some of these implementations of [`frame::traits::EnsureOrigin`] are similar to the ones +//! that we know about: [`frame::runtime::prelude::EnsureSigned`], +//! [`frame::runtime::prelude::EnsureSignedBy`], [`frame::runtime::prelude::EnsureRoot`], +//! [`frame::runtime::prelude::EnsureNone`], etc. But, there are also many more that are not known +//! to us, and are defined in other pallets. +//! +//! For example, [`pallet_collective`] defines [`pallet_collective::EnsureMember`] and +//! [`pallet_collective::EnsureProportionMoreThan`] and many more, which is exactly what we alluded +//! to earlier in this document. +//! +//! Make sure to check the full list of [implementors of +//! `EnsureOrigin`](frame::traits::EnsureOrigin#implementors) for more inspiration. +//! +//! ## Obtaining Abstract Origins +//! +//! So far we have learned that FRAME pallets can assert on custom and abstract origin types, +//! whether they are defined within the pallet or not. But how can we obtain these abstract origins? +//! +//! > All extrinsics that come from the outer world can generally only be obtained as either +//! > `signed` or `none` origin. +//! +//! Generally, these abstract origins are only obtained within the runtime, when a call is +//! dispatched within the runtime. +//! +//! ## Further References +//! +//! - [Gavin Wood's speech about FRAME features at Protocol Berg 2023.](https://youtu.be/j7b8Upipmeg?si=83_XUgYuJxMwWX4g&t=195) +//! - [A related StackExchange question.](https://substrate.stackexchange.com/questions/10992/how-do-you-find-the-public-key-for-the-medium-spender-track-origin) +//! +//! [^1]: Inherents are essentially unsigned extrinsics that need an [`frame_system::ensure_none`] +//! origin check, and through the virtue of being an inherent, are agreed upon by all validators. + +use frame::prelude::*; + +#[frame::pallet(dev_mode)] +pub mod pallet_for_origin { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(call_simple)] + #[pallet::call] + impl Pallet { + pub fn do_something(_origin: OriginFor) -> DispatchResult { + // ^^^^^^^^^^^^^^^^^^^^^ + todo!(); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_custom_origin { + use super::*; + + #[docify::export(custom_origin_bound)] + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeOrigin: From<::RuntimeOrigin> + + Into::RuntimeOrigin>>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(custom_origin)] + /// A dummy custom origin. + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + /// If all holders of a particular NFT have agreed upon this. + AllNftHolders, + /// If all validators have agreed upon this. + ValidatorSet, + } + + #[docify::export(custom_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn only_validators(origin: OriginFor) -> DispatchResult { + // first, we convert from `::RuntimeOrigin` to `::RuntimeOrigin` + let local_runtime_origin = <::RuntimeOrigin as From< + ::RuntimeOrigin, + >>::from(origin); + // then we convert to `origin`, if possible + let local_origin = + local_runtime_origin.into().map_err(|_| "invalid origin type provided")?; + ensure!(matches!(local_origin, Origin::ValidatorSet), "Not authorized"); + todo!(); + } + } +} + +pub mod runtime_for_origin { + use super::pallet_with_custom_origin; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithCustomOrigin: pallet_with_custom_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_with_custom_origin::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_external_origin { + use super::*; + #[docify::export(external_origin_def)] + #[pallet::config] + pub trait Config: frame_system::Config { + type ExternalOrigin: EnsureOrigin; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export(external_origin_usage)] + #[pallet::call] + impl Pallet { + pub fn externally_checked_ext(origin: OriginFor) -> DispatchResult { + let _ = T::ExternalOrigin::ensure_origin(origin)?; + todo!(); + } + } +} + +pub mod runtime_for_external_origin { + use super::*; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithExternalOrigin: pallet_with_external_origin, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(external_origin_provide)] + impl pallet_with_external_origin::Config for Runtime { + type ExternalOrigin = EnsureSigned<::AccountId>; + } +} diff --git a/docs/sdk/src/reference_docs/frame_runtime_types.rs b/docs/sdk/src/reference_docs/frame_runtime_types.rs new file mode 100644 index 000000000000..b5838b79e518 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_types.rs @@ -0,0 +1,306 @@ +//! # FRAME Runtime Types +//! +//! This reference document briefly explores the idea around types generated at the runtime level by +//! the FRAME macros. +//! +//! > As of now, many of these important types are generated within the internals of +//! > [`construct_runtime`], and there is no easy way for you to visually know they exist. +//! > [#polkadot-sdk#1378](https://github.com/paritytech/polkadot-sdk/pull/1378) is meant to +//! > significantly improve this. Exploring the rust-docs of a runtime, such as [`runtime`] which is +//! > defined in this module is as of now the best way to learn about these types. +//! +//! ## Composite Enums +//! +//! Many types within a FRAME runtime follow the following structure: +//! +//! * Each individual pallet defines a type, for example `Foo`. +//! * At the runtime level, these types are amalgamated into a single type, for example +//! `RuntimeFoo`. +//! +//! As the names suggest, all composite enums in a FRAME runtime start their name with `Runtime`. +//! For example, `RuntimeCall` is a representation of the most high level `Call`-able type in the +//! runtime. +//! +//! Composite enums are generally convertible to their individual parts as such: +#![doc = simple_mermaid::mermaid!("../../../mermaid/outer_runtime_types.mmd")] +//! +//! In that one can always convert from the inner type into the outer type, but not vice versa. This +//! is usually expressed by implementing `From`, `TryFrom`, `From>` and similar traits. +//! +//! ### Example +//! +//! We provide the following two pallets: [`pallet_foo`] and [`pallet_bar`]. Each define a +//! dispatchable, and `Foo` also defines a custom origin. Lastly, `Bar` defines an additional +//! `GenesisConfig`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_foo)] +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_bar)] +//! +//! Let's explore how each of these affect the [`RuntimeCall`], [`RuntimeOrigin`] and +//! [`RuntimeGenesisConfig`] generated in [`runtime`] by respectively. +//! +//! As observed, [`RuntimeCall`] has 3 variants, one for each pallet and one for `frame_system`. If +//! you explore further, you will soon realize that each variant is merely a pointer to the `Call` +//! type in each pallet, for example [`pallet_foo::Call`]. +//! +//! [`RuntimeOrigin`]'s [`OriginCaller`] has two variants, one for system, and one for `pallet_foo` +//! which utilized [`frame::pallet_macros::origin`]. +//! +//! Finally, [`RuntimeGenesisConfig`] is composed of `frame_system` and a variant for `pallet_bar`'s +//! [`pallet_bar::GenesisConfig`]. +//! +//! You can find other composite enums by scanning [`runtime`] for other types who's name starts +//! with `Runtime`. Some of the more noteworthy ones are: +//! +//! - [`RuntimeEvent`] +//! - [`RuntimeError`] +//! - [`RuntimeHoldReason`] +//! +//! ### Adding Further Constraints to Runtime Composite Enums +//! +//! This section explores a common scenario where a pallet has access to one of these runtime +//! composite enums, but it wishes to further specify it by adding more trait bounds to it. +//! +//! Let's take the example of `RuntimeCall`. This is an associated type in +//! [`frame_system::Config::RuntimeCall`], and all pallets have access to this type, because they +//! have access to [`frame_system::Config`]. Finally, this type is meant to be set to outer call of +//! the entire runtime. +//! +//! But, let's not forget that this is information that *we know*, and the Rust compiler does not. +//! All that the rust compiler knows about this type is *ONLY* what the trait bounds of +//! [`frame_system::Config::RuntimeCall`] are specifying: +#![doc = docify::embed!("../../substrate/frame/system/src/lib.rs", system_runtime_call)] +//! +//! So, when at a given pallet, one accesses `::RuntimeCall`, the type is +//! extremely opaque from the perspective of the Rust compiler. +//! +//! How can a pallet access the `RuntimeCall` type with further constraints? For example, each +//! pallet has its own `enum Call`, and knows that its local `Call` is a part of `RuntimeCall`, +//! therefore there should be a `impl From> for RuntimeCall`. +//! +//! The only way to express this using Rust's associated types is for the pallet to **define its own +//! associated type `RuntimeCall`, and further specify what it thinks `RuntimeCall` should be**. +//! +//! In this case, we will want to assert the existence of [`frame::traits::IsSubType`], which is +//! very similar to [`TryFrom`]. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call)] +//! +//! And indeed, at the runtime level, this associated type would be the same `RuntimeCall` that is +//! passed to `frame_system`. +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", pallet_with_specific_runtime_call_impl)] +//! +//! > In other words, the degree of specificity that [`frame_system::Config::RuntimeCall`] has is +//! > not enough for the pallet to work with. Therefore, the pallet has to define its own associated +//! > type representing `RuntimeCall`. +//! +//! Another way to look at this is: +//! +//! `pallet_with_specific_runtime_call::Config::RuntimeCall` and `frame_system::Config::RuntimeCall` +//! are two different representations of the same concrete type that is only known when the runtime +//! is being constructed. +//! +//! Now, within this pallet, this new `RuntimeCall` can be used, and it can use its new trait +//! bounds, such as being [`frame::traits::IsSubType`]: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", custom_runtime_call_usages)] +//! +//! ### Asserting Equality of Multiple Runtime Composite Enums +//! +//! Recall that in the above example, `::RuntimeCall` and `::RuntimeCall` are expected to be equal types, but at the compile-time we +//! have to represent them with two different associated types with different bounds. Would it not +//! be cool if we had a test to make sure they actually resolve to the same concrete type once the +//! runtime is constructed? The following snippet exactly does that: +#![doc = docify::embed!("./src/reference_docs/frame_runtime_types.rs", assert_equality)] +//! +//! We leave it to the reader to further explore what [`frame::traits::Hooks::integrity_test`] is, +//! and what [`core::any::TypeId`] is. Another way to assert this is using +//! [`frame::traits::IsType`]. +//! +//! ## Type Aliases +//! +//! A number of type aliases are generated by the `construct_runtime` which are also noteworthy: +//! +//! * [`runtime::PalletFoo`] is an alias to [`pallet_foo::Pallet`]. Same for `PalletBar`, and +//! `System` +//! * [`runtime::AllPalletsWithSystem`] is an alias for a tuple of all of the above. This type is +//! important to FRAME internals such as `executive`, as it implements traits such as +//! [`frame::traits::Hooks`]. +//! +//! ## Further Details +//! +//! * [`crate::reference_docs::frame_origin`] explores further details about the usage of +//! `RuntimeOrigin`. +//! * [`RuntimeCall`] is a particularly interesting composite enum as it dictates the encoding of an +//! extrinsic. See [`crate::reference_docs::signed_extensions`] for more information. +//! * See the documentation of [`construct_runtime`]. +//! * See the corresponding lecture in the [pba-book](https://polkadot-blockchain-academy.github.io/pba-book/frame/outer-enum/page.html). +//! +//! +//! [`construct_runtime`]: frame::runtime::prelude::construct_runtime +//! [`runtime::PalletFoo`]: crate::reference_docs::frame_runtime_types::runtime::PalletFoo +//! [`runtime::AllPalletsWithSystem`]: crate::reference_docs::frame_runtime_types::runtime::AllPalletsWithSystem +//! [`runtime`]: crate::reference_docs::frame_runtime_types::runtime +//! [`pallet_foo`]: crate::reference_docs::frame_runtime_types::pallet_foo +//! [`pallet_foo::Call`]: crate::reference_docs::frame_runtime_types::pallet_foo::Call +//! [`pallet_foo::Pallet`]: crate::reference_docs::frame_runtime_types::pallet_foo::Pallet +//! [`pallet_bar`]: crate::reference_docs::frame_runtime_types::pallet_bar +//! [`pallet_bar::GenesisConfig`]: crate::reference_docs::frame_runtime_types::pallet_bar::GenesisConfig +//! [`RuntimeEvent`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeEvent +//! [`RuntimeGenesisConfig`]: +//! crate::reference_docs::frame_runtime_types::runtime::RuntimeGenesisConfig +//! [`RuntimeOrigin`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeOrigin +//! [`OriginCaller`]: crate::reference_docs::frame_runtime_types::runtime::OriginCaller +//! [`RuntimeError`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeError +//! [`RuntimeCall`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeCall +//! [`RuntimeHoldReason`]: crate::reference_docs::frame_runtime_types::runtime::RuntimeHoldReason + +use frame::prelude::*; + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_foo { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::origin] + #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + pub enum Origin { + A, + B, + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + + pub fn other(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod pallet_bar { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::genesis_config] + #[derive(DefaultNoBound)] + pub struct GenesisConfig { + pub initial_account: Option, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) {} + } + + #[pallet::call] + impl Pallet { + pub fn bar(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } +} + +pub mod runtime { + use super::{pallet_bar, pallet_foo}; + use frame::{runtime::prelude::*, testing_prelude::*}; + + #[docify::export(runtime_exp)] + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletFoo: pallet_foo, + PalletBar: pallet_bar, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_foo::Config for Runtime {} + impl pallet_bar::Config for Runtime {} +} + +#[frame::pallet(dev_mode)] +pub mod pallet_with_specific_runtime_call { + use super::*; + use frame::traits::IsSubType; + + #[docify::export(custom_runtime_call)] + /// A pallet that wants to further narrow down what `RuntimeCall` is. + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeCall: IsSubType>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + // note that this pallet needs some `call` to have a `enum Call`. + #[pallet::call] + impl Pallet { + pub fn foo(_origin: OriginFor) -> DispatchResult { + todo!(); + } + } + + #[docify::export(custom_runtime_call_usages)] + impl Pallet { + fn _do_something_useful_with_runtime_call(call: ::RuntimeCall) { + // check if the runtime call given is of this pallet's variant. + let _maybe_my_call: Option<&Call> = call.is_sub_type(); + todo!(); + } + } + + #[docify::export(assert_equality)] + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + use core::any::TypeId; + assert_eq!( + TypeId::of::<::RuntimeCall>(), + TypeId::of::<::RuntimeCall>() + ); + } + } +} + +pub mod runtime_with_specific_runtime_call { + use super::pallet_with_specific_runtime_call; + use frame::{runtime::prelude::*, testing_prelude::*}; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + PalletWithSpecificRuntimeCall: pallet_with_specific_runtime_call, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + #[docify::export(pallet_with_specific_runtime_call_impl)] + impl pallet_with_specific_runtime_call::Config for Runtime { + // an implementation of `IsSubType` is provided by `construct_runtime`. + type RuntimeCall = RuntimeCall; + } +} diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index c16122ee4287..760bb442c166 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -43,16 +43,16 @@ pub mod extrinsic_encoding; // TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 pub mod signed_extensions; -/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. -// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +/// Learn about *Origins*, a topic in FRAME that enables complex account abstractions to be built. pub mod frame_origin; /// Learn about how to write safe and defensive code in your FRAME runtime. // TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 pub mod safe_defensive_programming; -/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". -pub mod frame_composite_enums; +/// Learn about composite enums and other runtime level types, such as "RuntimeEvent" and +/// "RuntimeCall". +pub mod frame_runtime_types; /// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to /// control usage and sybil attacks. diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index a1715ba49001..a2e5d726fdd0 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -163,6 +163,12 @@ pub mod runtime { ConstU32, ConstU64, ConstU8, }; + /// Primary types used to parameterize `EnsureOrigin` and `EnsureRootWithArg`. + pub use frame_system::{ + EnsureNever, EnsureNone, EnsureRoot, EnsureRootWithSuccess, EnsureSigned, + EnsureSignedBy, + }; + /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index b421d2aaffab..83049919d01c 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -104,7 +104,7 @@ pub fn expand_outer_origin( #[doc = #doc_string] #[derive(Clone)] pub struct RuntimeOrigin { - caller: OriginCaller, + pub caller: OriginCaller, filter: #scrate::__private::sp_std::rc::Rc::RuntimeCall) -> bool>>, } diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 20b8d74310f3..441b21e26ff2 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1480,22 +1480,11 @@ pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. /// -/// Item must be either a type alias, an enum, or a struct. It needs to be public. -/// -/// E.g.: -/// -/// ```ignore -/// #[pallet::origin] -/// pub struct Origin(PhantomData<(T)>); -/// ``` -/// -/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care -/// as it might require some migration. +/// --- /// -/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// `frame_support::pallet_macros::origin`. #[proc_macro_attribute] pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index cd12da6de54e..26fc1fe42c11 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2274,9 +2274,8 @@ pub mod pallet_macros { pub use frame_support_procedural::{ composite_enum, config, disable_frame_system_supertrait_check, error, event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, - import_section, inherent, no_default, no_default_bounds, origin, pallet_section, - storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, - whitelist_storage, + import_section, inherent, no_default, no_default_bounds, pallet_section, storage_prefix, + storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In @@ -2718,7 +2717,7 @@ pub mod pallet_macros { /// } /// ``` pub use frame_support_procedural::storage; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the conditions for a /// given work item to be valid. /// @@ -2726,21 +2725,21 @@ pub mod pallet_macros { /// should have the same signature as the function it is attached to, except that it should /// return a `bool` instead. pub use frame_support_procedural::task_condition; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define the index of a given /// work item. /// /// It takes an integer literal as input, which is then used to define the index. This /// index should be unique for each function in the `impl` block. pub use frame_support_procedural::task_index; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) to define an iterator over the /// available work items for a task. /// /// It takes an iterator as input that yields a tuple with same types as the function /// arguments. pub use frame_support_procedural::task_list; - /// This attribute is attached to a function inside an `impl` block annoated with + /// This attribute is attached to a function inside an `impl` block annotated with /// [`pallet::tasks_experimental`](`tasks_experimental`) define the weight of a given work /// item. /// @@ -2773,6 +2772,61 @@ pub mod pallet_macros { /// Now, this can be executed as follows: #[doc = docify::embed!("src/tests/tasks.rs", tasks_work)] pub use frame_support_procedural::tasks_experimental; + + /// Allows a pallet to declare a type as an origin. + /// + /// If defined as such, this type will be amalgamated at the runtime level into + /// `RuntimeOrigin`, very similar to [`call`], [`error`] and [`event`]. See + /// [`composite_enum`] for similar cases. + /// + /// Origin is a complex FRAME topics and is further explained in `polkadot_sdk_docs`. + /// + /// ## Syntax Variants + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// /// On the spot declaration. + /// #[pallet::origin] + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum Origin { + /// Foo, + /// Bar, + /// } + /// } + /// ``` + /// + /// Or, more commonly used:/ + /// + /// ``` + /// #[frame_support::pallet] + /// mod pallet { + /// # use frame_support::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] + /// pub enum RawOrigin { + /// Foo, + /// Bar, + /// } + /// + /// #[pallet::origin] + /// pub type Origin = RawOrigin; + /// } + /// ``` + /// + /// ## Warning + /// + /// Modifying any pallet's origin type will cause the runtime level origin type to also + /// change in encoding. If stored anywhere on-chain, this will require a data migration. + pub use frame_support_procedural::origin; } #[deprecated(note = "Will be removed after July 2023; Use `sp_runtime::traits` directly instead.")] diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 1405c7303f87..aa6841c008a9 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -452,6 +452,7 @@ pub mod pallet { + Clone + OriginTrait; + #[docify::export(system_runtime_call)] /// The aggregated `RuntimeCall` type. #[pallet::no_default_bounds] type RuntimeCall: Parameter From 0cc9b9003c019400a8e26c2e1d5679ab654a63dc Mon Sep 17 00:00:00 2001 From: Petr Mensik Date: Tue, 27 Feb 2024 23:50:07 +0100 Subject: [PATCH 48/51] Add Polkadotters bootnoders per IBP application (#3423) Hey everyone, this PR will replace existing Polkadotters bootnodes for Polkadot, Kusama and Westend and add Paseo bootnode to the relay chain suite. At the same time, it will add new bootnodes for all the system parachains, including People on Westend. This PR is a part of our membership in the IBP, meaning that all the bootnodes are hosted on our hardware housed in the data center in Christchurch, New Zealand. All the bootnodes were tested with an empty chain spec file with the following command yielding 1 peer. The test commands used are as follows: ``` ./polkadot --base-path /tmp/node --reserved-only --chain paseo --reserved-nodes "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ./polkadot --base-path /tmp/node --reserved-only --chain westend --reserved-nodes "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5" ./polkadot --base-path /tmp/node --reserved-only --chain kusama --reserved-nodes "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG" ./polkadot --base-path /tmp/node --no-hardware-benchmarks --reserved-only --chain polkadot --reserved-nodes "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-kusama --reserved-nodes "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-polkadot --reserved-nodes "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain asset-hub-westend --reserved-nodes "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-kusama --reserved-nodes "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-kusama --reserved-nodes "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain bridge-hub-westend --reserved-nodes "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain collectives-polkadot --reserved-nodes "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain collectives-westend --reserved-nodes "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ./polkadot-parachain --base-path /tmp/node --reserved-only --chain people-westend --reserved-nodes "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ``` Best regards, Petr, Polkadotters --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/collectives-polkadot.json | 4 +++- cumulus/parachains/chain-specs/collectives-westend.json | 4 +++- cumulus/parachains/chain-specs/people-westend.json | 6 ++++-- polkadot/node/service/chain-specs/kusama.json | 4 ++-- polkadot/node/service/chain-specs/paseo.json | 4 +++- polkadot/node/service/chain-specs/polkadot.json | 4 ++-- polkadot/node/service/chain-specs/westend.json | 4 ++-- 13 files changed, 37 insertions(+), 17 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index fba74b17f960..654275eade81 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", - "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30511/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU", + "/dns/asset-hub-kusama.bootnodes.polkadotters.com/tcp/30513/wss/p2p/12D3KooWDpk7wVH7RgjErEvbvAZ2kY5VeaAwRJP5ojmn1e8b8UbU" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index 685a00ddc714..454060d2a87a 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -25,7 +25,9 @@ "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", - "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30508/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R", + "/dns/asset-hub-polkadot.bootnodes.polkadotters.com/tcp/30510/wss/p2p/12D3KooWKbfY9a9oywxMJKiALmt7yhrdQkjXMtvxhhDDN23vG93R" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 6f42b5f7d8bb..670935c9d247 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -23,7 +23,9 @@ "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", - "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30514/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk", + "/dns/asset-hub-westend.bootnodes.polkadotters.com/tcp/30516/wss/p2p/12D3KooWNFYysCqmojxqjjaTfD2VkWBNngfyUKWjcR4WFixfHNTk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 0ef81806cc5c..90b70b05016d 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -25,7 +25,9 @@ "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", - "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30520/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV", + "/dns/bridge-hub-kusama.bootnodes.polkadotters.com/tcp/30522/wss/p2p/12D3KooWH3pucezRRS5esoYyzZsUkKWcPSByQxEvmM819QL1HPLV" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index 130bdf31ef21..a9444b89e1e2 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -21,7 +21,9 @@ "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", - "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30517/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB", + "/dns/bridge-hub-polkadot.bootnodes.polkadotters.com/tcp/30519/wss/p2p/12D3KooWLUNE3LHPDa1WrrZaYT7ArK66CLM1bPv7kKz74UcLnQRB" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 018ab0ee6fd9..447207a58107 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -19,7 +19,9 @@ "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", - "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30523/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj", + "/dns/bridge-hub-westend.bootnodes.polkadotters.com/tcp/30525/wss/p2p/12D3KooWPkwgJofp4GeeRwNgXqkp2aFwdLkCWv3qodpBJLwK43Jj" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index e9f690234e43..259669cf37a0 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -25,7 +25,9 @@ "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", - "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30526/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff", + "/dns/collectives-polkadot.bootnodes.polkadotters.com/tcp/30528/wss/p2p/12D3KooWNohUjvJtGKUa8Vhy8C1ZBB5N8JATB6e7rdLVCioeb3ff" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index ffe73b5a05a3..e459c631f8be 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -25,7 +25,9 @@ "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", - "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30529/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn", + "/dns/collectives-westend.bootnodes.polkadotters.com/tcp/30531/wss/p2p/12D3KooWAFkXNSBfyPduZVgfS7pj5NuVpbU8Ee5gHeF8wvos7Yqn" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index fa29853c70b0..29fa0c9cde79 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -10,7 +10,9 @@ "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGVYTVKW7tYe51JvetvGvVLDPXzqQX1mueJgz14FgkmHG", "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", - "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23" + "/dns/westend-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWCF1eA2Gap69zgXD7Df3e9DqDUsGoByocggTGejoHjK23", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", + "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +81,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 979550c75706..fd60cb8b6c1d 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -16,8 +16,8 @@ "/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD", "/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", "/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9", - "/dns/kusama-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", - "/dns/kusama-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30311/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", + "/dns/kusama.bootnodes.polkadotters.com/tcp/30313/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG", "/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD", "/dns/boot-kusama.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6", diff --git a/polkadot/node/service/chain-specs/paseo.json b/polkadot/node/service/chain-specs/paseo.json index c8f2b58533c6..8db75ff137b0 100644 --- a/polkadot/node/service/chain-specs/paseo.json +++ b/polkadot/node/service/chain-specs/paseo.json @@ -12,7 +12,9 @@ "/dns/boot-node.helikon.io/tcp/10020/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/boot-node.helikon.io/tcp/10022/wss/p2p/12D3KooWBetfzZpf6tGihKrqCo5z854Ub4ZNAUUTRT6eYHNh7FYi", "/dns/pso16.rotko.net/tcp/33246/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", - "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu" + "/dns/pso16.rotko.net/tcp/35246/wss/p2p/12D3KooWRH8eBMhw8c7bucy6pJfy94q4dKpLkF3pmeGohHmemdRu", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30538/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP", + "/dns/paseo.bootnodes.polkadotters.com/tcp/30540/wss/p2p/12D3KooWPbbFy4TefEGTRF5eTYhq8LEzc4VAHdNUVCbY4nAnhqPP" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 71dbb9004038..c235cdd09d8b 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -17,8 +17,8 @@ "/dns/boot-node.helikon.io/tcp/7072/wss/p2p/12D3KooWS9ZcvRxyzrSf6p63QfTCWs12nLoNKhGux865crgxVA4H", "/dns/polkadot.bootnode.amforc.com/tcp/30333/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", "/dns/polkadot.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWAsuCEVCzUVUrtib8W82Yne3jgVGhQZN3hizko5FTnDg3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", - "/dns/polkadot-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30314/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", + "/dns/polkadot.bootnodes.polkadotters.com/tcp/30316/wss/p2p/12D3KooWPAVUgBaBk6n8SztLrMk8ESByncbAfRKUdxY1nygb9zG3", "/dns/boot-cr.gatotech.network/tcp/33100/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-cr.gatotech.network/tcp/35100/wss/p2p/12D3KooWK4E16jKk9nRhvC4RfrDVgcZzExg8Q3Q2G7ABUUitks1w", "/dns/boot-polkadot.metaspan.io/tcp/13012/p2p/12D3KooWRjHFApinuqSBjoaDjQHvxwubQSpEVy5hrgC9Smvh92WF", diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index 697675871fcd..775f3e72ac75 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -14,8 +14,8 @@ "/dns/boot-node.helikon.io/tcp/7082/wss/p2p/12D3KooWRFDPyT8vA8mLzh6dJoyujn4QNjeqi6Ch79eSMz9beKXC", "/dns/westend.bootnode.amforc.com/tcp/30333/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", "/dns/westend.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWJ5y9ZgVepBQNW4aabrxgmnrApdVnscqgKWiUu4BNJbC8", - "/dns/westend-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", - "/dns/westend-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30308/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", + "/dns/westend.bootnodes.polkadotters.com/tcp/30310/wss/p2p/12D3KooWHPHb64jXMtSRJDrYFATWeLnvChL8NtWVttY67DCH1eC5", "/dns/boot-cr.gatotech.network/tcp/33300/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-cr.gatotech.network/tcp/35300/wss/p2p/12D3KooWQGR1vUhoy6mvQorFp3bZFn6NNezhQZ6NWnVV7tpFgoPd", "/dns/boot-westend.metaspan.io/tcp/33012/p2p/12D3KooWNTau7iG4G9cUJSwwt2QJP1W88pUf2SgqsHjRU2RL8pfa", From 95da6583602691fd0c8403e9eda26db1e10dafc4 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Wed, 28 Feb 2024 13:13:09 +1100 Subject: [PATCH 49/51] Introduce storage attr macro `#[disable_try_decode_storage]` and set it on `System::Events` and `ParachainSystem::HostConfiguration` (#3454) Closes https://github.com/paritytech/polkadot-sdk/issues/2560 Allows marking storage items with `#[disable_try_decode_storage]`, and uses it with `System::Events`. Question: what's the recommended way to write a test for this? I couldn't find a test for similar existing macro `#[whitelist_storage]`. --- cumulus/pallets/parachain-system/src/lib.rs | 1 + prdoc/pr_3454.prdoc | 19 +++++++++++++++ substrate/frame/support/procedural/src/lib.rs | 19 +++++++++++++++ .../procedural/src/pallet/expand/storage.rs | 5 +++- .../procedural/src/pallet/parse/storage.rs | 24 +++++++++++++++++-- substrate/frame/support/src/lib.rs | 17 +++++++++++-- .../storage_invalid_attribute.stderr | 2 +- substrate/frame/system/src/lib.rs | 1 + 8 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_3454.prdoc diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index d86a67e58f44..3439227bc5b0 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -840,6 +840,7 @@ pub mod pallet { /// /// This data is also absent from the genesis. #[pallet::storage] + #[pallet::disable_try_decode_storage] #[pallet::getter(fn host_configuration)] pub(super) type HostConfiguration = StorageValue<_, AbridgedHostConfiguration>; diff --git a/prdoc/pr_3454.prdoc b/prdoc/pr_3454.prdoc new file mode 100644 index 000000000000..7f564258f23a --- /dev/null +++ b/prdoc/pr_3454.prdoc @@ -0,0 +1,19 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Introduce storage attr macro #[disable_try_decode_storage] and set it on System::Events and ParachainSystem::HostConfiguration" + +doc: + - audience: Runtime Dev + description: | + Allows marking storage items with \#[disable_try_decode_storage], which disables that storage item from being decoded + during try_decode_entire_state calls. + + Applied the attribute to System::Events to close https://github.com/paritytech/polkadot-sdk/issues/2560. + Applied the attribute to ParachainSystem::HostConfiguration to resolve periodic issues with it. + +crates: + - name: frame-support-procedural + - name: frame-system + - name: cumulus-pallet-parachain-system + diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 441b21e26ff2..78729ee3dc3f 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1371,6 +1371,25 @@ pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the +/// storage as whitelisted from decoding during try-runtime checks. This should only be +/// attached to transient storage which cannot be migrated during runtime upgrades. +/// +/// ### Example +/// ```ignore +/// #[pallet::storage] +/// #[pallet::disable_try_decode_storage] +/// pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; +/// ``` +/// +/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as +/// `#[pallet::disable_try_decode_storage]` and can only be placed inside a `pallet` module in order +/// for it to work properly. +#[proc_macro_attribute] +pub fn disable_try_decode_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait /// to ease the use of storage types. This attribute is meant to be used alongside /// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 96c2c8e3120b..937b068cfabd 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -834,7 +834,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { .storages .iter() .filter_map(|storage| { - if storage.cfg_attrs.is_empty() { + // A little hacky; don't generate for cfg gated storages to not get compile errors + // when building "frame-feature-testing" gated storages in the "frame-support-test" + // crate. + if storage.try_decode && storage.cfg_attrs.is_empty() { let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); Some(quote::quote_spanned!(storage.attr_span => #ident<#gen> )) diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index d1c7ba2e5e3c..9d96a18b5694 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(whitelist_storage); + syn::custom_keyword!(disable_try_decode_storage); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); @@ -39,11 +40,13 @@ mod keyword { /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` /// * `#[pallet::whitelist_storage] +/// * `#[pallet::disable_try_decode_storage]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), WhitelistStorage(proc_macro2::Span), + DisableTryDecodeStorage(proc_macro2::Span), } impl PalletStorageAttr { @@ -53,6 +56,7 @@ impl PalletStorageAttr { Self::StorageName(_, span) | Self::Unbounded(span) | Self::WhitelistStorage(span) => *span, + Self::DisableTryDecodeStorage(span) => *span, } } } @@ -93,6 +97,9 @@ impl syn::parse::Parse for PalletStorageAttr { } else if lookahead.peek(keyword::whitelist_storage) { content.parse::()?; Ok(Self::WhitelistStorage(attr_span)) + } else if lookahead.peek(keyword::disable_try_decode_storage) { + content.parse::()?; + Ok(Self::DisableTryDecodeStorage(attr_span)) } else { Err(lookahead.error()) } @@ -104,6 +111,7 @@ struct PalletStorageAttrInfo { rename_as: Option, unbounded: bool, whitelisted: bool, + try_decode: bool, } impl PalletStorageAttrInfo { @@ -112,6 +120,7 @@ impl PalletStorageAttrInfo { let mut rename_as = None; let mut unbounded = false; let mut whitelisted = false; + let mut disable_try_decode_storage = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), @@ -119,6 +128,8 @@ impl PalletStorageAttrInfo { rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, + PalletStorageAttr::DisableTryDecodeStorage(..) if !disable_try_decode_storage => + disable_try_decode_storage = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -127,7 +138,13 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) + Ok(PalletStorageAttrInfo { + getter, + rename_as, + unbounded, + whitelisted, + try_decode: !disable_try_decode_storage, + }) } } @@ -186,6 +203,8 @@ pub struct StorageDef { pub unbounded: bool, /// Whether or not reads to this storage key will be ignored by benchmarking pub whitelisted: bool, + /// Whether or not to try to decode the storage key when running try-runtime checks. + pub try_decode: bool, /// Whether or not a default hasher is allowed to replace `_` pub use_default_hasher: bool, } @@ -775,7 +794,7 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted, try_decode } = PalletStorageAttrInfo::from_attrs(attrs)?; // set all storages to be unbounded if dev_mode is enabled @@ -921,6 +940,7 @@ impl StorageDef { named_generics, unbounded, whitelisted, + try_decode, use_default_hasher, }) } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 26fc1fe42c11..3e97a3842756 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -982,6 +982,7 @@ pub mod pallet_prelude { /// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) /// * [`pallet::unbounded`](#palletunbounded-optional) /// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) +/// * [`pallet::disable_try_decode_storage`](#palletdisable_try_decode_storage-optional) /// * [`cfg(..)`](#cfg-for-storage) (on storage items) /// * [`pallet::type_value`](#type-value-pallettype_value-optional) /// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) @@ -1498,6 +1499,15 @@ pub mod pallet_prelude { /// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) /// for more info. /// +/// ## `#[pallet::disable_try_decode_storage]` (optional) +/// +/// The optional attribute `#[pallet::disable_try_decode_storage]` will declare the storage as +/// whitelisted state decoding during try-runtime logic. +/// +/// See +/// [`pallet::disable_try_decode_storage`](frame_support::pallet_macros::disable_try_decode_storage) +/// for more info. +/// /// ## `#[cfg(..)]` (for storage) /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// @@ -2272,8 +2282,8 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - composite_enum, config, disable_frame_system_supertrait_check, error, event, - extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + composite_enum, config, disable_frame_system_supertrait_check, disable_try_decode_storage, + error, event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, pallet_section, storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; @@ -2698,6 +2708,8 @@ pub mod pallet_macros { /// * [`macro@getter`]: Creates a custom getter function. /// * [`macro@storage_prefix`]: Overrides the default prefix of the storage item. /// * [`macro@unbounded`]: Declares the storage item as unbounded. + /// * [`macro@disable_try_decode_storage`]: Declares that try-runtime checks should not + /// attempt to decode the storage item. /// /// #### Example /// ``` @@ -2713,6 +2725,7 @@ pub mod pallet_macros { /// #[pallet::getter(fn foo)] /// #[pallet::storage_prefix = "OtherFoo"] /// #[pallet::unbounded] + /// #[pallet::disable_try_decode_storage] /// pub type Foo = StorageValue<_, u32, ValueQuery>; /// } /// ``` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 7f125526edf2..519fadaa6049 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` +error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage`, `disable_try_decode_storage` --> tests/pallet_ui/storage_invalid_attribute.rs:33:12 | 33 | #[pallet::generate_store(pub trait Store)] diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index aa6841c008a9..c527a483d880 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -894,6 +894,7 @@ pub mod pallet { /// just in case someone still reads them from within the runtime. #[pallet::storage] #[pallet::whitelist_storage] + #[pallet::disable_try_decode_storage] #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; From 7ec0b8741b4ead5bc85411b30e53b459a03b1938 Mon Sep 17 00:00:00 2001 From: maksimryndin Date: Wed, 28 Feb 2024 05:05:54 +0100 Subject: [PATCH 50/51] Collator overseer builder unification (#3335) resolve https://github.com/paritytech/polkadot-sdk/issues/3116 a follow-up on https://github.com/paritytech/polkadot-sdk/pull/3061#pullrequestreview-1847530265: - [x] reuse collator overseer builder for polkadot-node and collator - [x] run zombienet test (0001-parachains-smoke-test.toml) - [x] make wasm build errors more user-friendly for an easier problem detection when using different toolchains in Rust --------- Co-authored-by: ordian Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> --- Cargo.lock | 1 + .../relay-chain-minimal-node/Cargo.toml | 1 + .../src/collator_overseer.rs | 144 +----------------- .../relay-chain-minimal-node/src/lib.rs | 30 ++-- .../relay-chain-minimal-node/src/network.rs | 8 +- .../src/variants/back_garbage_candidate.rs | 15 +- polkadot/node/malus/src/variants/common.rs | 4 - .../variants/dispute_finalized_candidates.rs | 17 +-- .../src/variants/dispute_valid_candidates.rs | 15 +- .../src/variants/suggest_garbage_candidate.rs | 17 +-- .../malus/src/variants/support_disabled.rs | 15 +- polkadot/node/service/src/lib.rs | 13 +- polkadot/node/service/src/overseer.rs | 76 +++------ .../subsystem-types/src/runtime_client.rs | 64 +++++++- .../utils/wasm-builder/src/prerequisites.rs | 20 ++- 15 files changed, 162 insertions(+), 278 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c52feafe7a70..27ba43780eda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4166,6 +4166,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-overseer", "polkadot-primitives", + "polkadot-service", "sc-authority-discovery", "sc-client-api", "sc-network", diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 0b1e001e007a..98240c92adab 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -24,6 +24,7 @@ polkadot-node-collation-generation = { path = "../../../polkadot/node/collation- polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } +polkadot-service = { path = "../../../polkadot/node/service" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 5f5bf338ef99..f01ef8b05ecb 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,159 +15,29 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use parking_lot::Mutex; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use polkadot_availability_recovery::AvailabilityRecoverySubsystem; -use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; -use polkadot_network_bridge::{ - Metrics as NetworkBridgeMetrics, NetworkBridgeRx as NetworkBridgeRxSubsystem, - NetworkBridgeTx as NetworkBridgeTxSubsystem, -}; -use polkadot_node_collation_generation::CollationGenerationSubsystem; -use polkadot_node_core_chain_api::ChainApiSubsystem; -use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; -use polkadot_node_core_runtime_api::RuntimeApiSubsystem; -use polkadot_node_network_protocol::{ - peer_set::{PeerSet, PeerSetProtocolNames}, - request_response::{ - v1::{self, AvailableDataFetchingRequest}, - v2, IncomingRequestReceiver, ReqProtocolNames, - }, -}; -use polkadot_node_subsystem_util::metrics::{prometheus::Registry, Metrics}; use polkadot_overseer::{ - BlockInfo, DummySubsystem, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, - UnpinHandle, + BlockInfo, Handle, Overseer, OverseerConnector, OverseerHandle, SpawnGlue, UnpinHandle, }; -use polkadot_primitives::CollatorPair; +use polkadot_service::overseer::{collator_overseer_builder, OverseerGenArgs}; -use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; -use cumulus_primitives_core::relay_chain::{Block, Hash as PHash}; use cumulus_relay_chain_interface::RelayChainError; use crate::BlockChainRpcClient; -/// Arguments passed for overseer construction. -pub(crate) struct CollatorOverseerGenArgs<'a> { - /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. - pub runtime_client: Arc, - /// Underlying network service implementation. - pub network_service: Arc>, - /// Syncing oracle. - pub sync_oracle: Box, - /// Underlying authority discovery service. - pub authority_discovery_service: AuthorityDiscoveryService, - /// Receiver for collation request protocol v1. - pub collation_req_receiver_v1: IncomingRequestReceiver, - /// Receiver for collation request protocol v2. - pub collation_req_receiver_v2: IncomingRequestReceiver, - /// Receiver for availability request protocol - pub available_data_req_receiver: IncomingRequestReceiver, - /// Prometheus registry, commonly used for production systems, less so for test. - pub registry: Option<&'a Registry>, - /// Task spawner to be used throughout the overseer and the APIs it provides. - pub spawner: sc_service::SpawnTaskHandle, - /// Determines the behavior of the collator. - pub collator_pair: CollatorPair, - /// Request response protocols - pub req_protocol_names: ReqProtocolNames, - /// Peerset protocols name mapping - pub peer_set_protocol_names: PeerSetProtocolNames, - /// Notification services for validation/collation protocols. - pub notification_services: HashMap>, -} - fn build_overseer( connector: OverseerConnector, - CollatorOverseerGenArgs { - runtime_client, - network_service, - sync_oracle, - authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, - available_data_req_receiver, - registry, - spawner, - collator_pair, - req_protocol_names, - peer_set_protocol_names, - notification_services, - }: CollatorOverseerGenArgs<'_>, + args: OverseerGenArgs, ) -> Result< (Overseer, Arc>, OverseerHandle), RelayChainError, > { - let spawner = SpawnGlue(spawner); - let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let notification_sinks = Arc::new(Mutex::new(HashMap::new())); - - let builder = Overseer::builder() - .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::for_collator( - available_data_req_receiver, - Metrics::register(registry)?, - )) - .availability_store(DummySubsystem) - .bitfield_distribution(DummySubsystem) - .bitfield_signing(DummySubsystem) - .candidate_backing(DummySubsystem) - .candidate_validation(DummySubsystem) - .pvf_checker(DummySubsystem) - .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) - .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) - .collator_protocol({ - let side = ProtocolSide::Collator { - peer_id: network_service.local_peer_id(), - collator_pair, - request_receiver_v1: collation_req_receiver_v1, - request_receiver_v2: collation_req_receiver_v2, - metrics: Metrics::register(registry)?, - }; - CollatorProtocolSubsystem::new(side) - }) - .network_bridge_rx(NetworkBridgeRxSubsystem::new( - network_service.clone(), - authority_discovery_service.clone(), - sync_oracle, - network_bridge_metrics.clone(), - peer_set_protocol_names.clone(), - notification_services, - notification_sinks.clone(), - )) - .network_bridge_tx(NetworkBridgeTxSubsystem::new( - network_service, - authority_discovery_service, - network_bridge_metrics, - req_protocol_names, - peer_set_protocol_names, - notification_sinks, - )) - .provisioner(DummySubsystem) - .runtime_api(RuntimeApiSubsystem::new( - runtime_client.clone(), - Metrics::register(registry)?, - spawner.clone(), - )) - .statement_distribution(DummySubsystem) - .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) - .approval_distribution(DummySubsystem) - .approval_voting(DummySubsystem) - .gossip_support(DummySubsystem) - .dispute_coordinator(DummySubsystem) - .dispute_distribution(DummySubsystem) - .chain_selection(DummySubsystem) - .activation_external_listeners(Default::default()) - .span_per_active_leaf(Default::default()) - .active_leaves(Default::default()) - .supports_parachains(runtime_client) - .metrics(Metrics::register(registry)?) - .spawner(spawner); + let builder = + collator_overseer_builder(args).map_err(|e| RelayChainError::Application(e.into()))?; builder .build_with_connector(connector) @@ -175,7 +45,7 @@ fn build_overseer( } pub(crate) fn spawn_overseer( - overseer_args: CollatorOverseerGenArgs, + overseer_args: OverseerGenArgs, task_manager: &TaskManager, relay_chain_rpc_client: Arc, ) -> Result { diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index d121d2d33567..4bccca59fe3e 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use collator_overseer::{CollatorOverseerGenArgs, NewMinimalNode}; +use collator_overseer::NewMinimalNode; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterface, Url}; @@ -29,6 +29,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::prometheus::Registry; use polkadot_primitives::CollatorPair; +use polkadot_service::{overseer::OverseerGenArgs, IsParachainNode}; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_network::{config::FullNetworkConfiguration, Event, NetworkEventStream, NetworkService}; @@ -172,10 +173,10 @@ async fn new_minimal_relay_chain( } let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); - let peer_set_protocol_names = + let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + let notification_services = peer_sets_info(is_authority, &peerset_protocol_names) .into_iter() .map(|(config, (peerset, service))| { net_config.add_notification_protocol(config); @@ -184,14 +185,14 @@ async fn new_minimal_relay_chain( .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); - let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = + let (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) = build_request_response_protocol_receivers(&request_protocol_names, &mut net_config); let best_header = relay_chain_rpc_client .chain_get_header(None) .await? .ok_or_else(|| RelayChainError::RpcCallError("Unable to fetch best header".to_string()))?; - let (network, network_starter, sync_oracle) = build_collator_network( + let (network, network_starter, sync_service) = build_collator_network( &config, net_config, task_manager.spawn_handle(), @@ -208,19 +209,20 @@ async fn new_minimal_relay_chain( prometheus_registry.cloned(), ); - let overseer_args = CollatorOverseerGenArgs { + let overseer_args = OverseerGenArgs { runtime_client: relay_chain_rpc_client.clone(), network_service: network, - sync_oracle, + sync_service, authority_discovery_service, - collation_req_receiver_v1, - collation_req_receiver_v2, + collation_req_v1_receiver, + collation_req_v2_receiver, available_data_req_receiver, registry: prometheus_registry, spawner: task_manager.spawn_handle(), - collator_pair, + is_parachain_node: IsParachainNode::Collator(collator_pair), + overseer_message_channel_capacity_override: None, req_protocol_names: request_protocol_names, - peer_set_protocol_names, + peerset_protocol_names, notification_services, }; @@ -240,10 +242,10 @@ fn build_request_response_protocol_receivers( IncomingRequestReceiver, IncomingRequestReceiver, ) { - let (collation_req_receiver_v1, cfg) = + let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); - let (collation_req_receiver_v2, cfg) = + let (collation_req_v2_receiver, cfg) = IncomingRequest::get_config_receiver(request_protocol_names); config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = @@ -251,5 +253,5 @@ fn build_request_response_protocol_receivers( config.add_request_response_protocol(cfg); let cfg = Protocol::ChunkFetchingV1.get_outbound_only_config(request_protocol_names); config.add_request_response_protocol(cfg); - (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) + (collation_req_v1_receiver, collation_req_v2_receiver, available_data_req_receiver) } diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 95785063c1ae..7286fab7907c 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -40,7 +40,11 @@ pub(crate) fn build_collator_network( genesis_hash: Hash, best_header: Header, ) -> Result< - (Arc>, NetworkStarter, Box), + ( + Arc>, + NetworkStarter, + Arc, + ), Error, > { let protocol_id = config.protocol_id(); @@ -112,7 +116,7 @@ pub(crate) fn build_collator_network( let network_starter = NetworkStarter::new(network_start_tx); - Ok((network_service, network_starter, Box::new(SyncOracle {}))) + Ok((network_service, network_starter, Arc::new(SyncOracle {}))) } fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { diff --git a/polkadot/node/malus/src/variants/back_garbage_candidate.rs b/polkadot/node/malus/src/variants/back_garbage_candidate.rs index 82475d291422..b939a2151e23 100644 --- a/polkadot/node/malus/src/variants/back_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/back_garbage_candidate.rs @@ -20,14 +20,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::{ @@ -63,13 +62,9 @@ impl OverseerGen for BackGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 011fcc80e373..eb6988f81811 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -23,10 +23,6 @@ use crate::{ use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{InvalidCandidate, ValidationResult}; -use polkadot_node_subsystem::{ - messages::{CandidateValidationMessage, ValidationFailed}, - overseer, -}; use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs index b3555ba2f5be..7a95bdaead26 100644 --- a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -34,14 +34,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; -use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; -use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem::SpawnGlue; +use polkadot_node_subsystem_types::{ChainApiBackend, OverseerSignal, RuntimeApiSubsystemClient}; use polkadot_node_subsystem_util::request_candidate_events; use polkadot_primitives::CandidateEvent; use sp_core::traits::SpawnNamed; @@ -237,13 +236,9 @@ impl OverseerGen for DisputeFinalizedCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs index b9812cbb5012..a50fdce16e4e 100644 --- a/polkadot/node/malus/src/variants/dispute_valid_candidates.rs +++ b/polkadot/node/malus/src/variants/dispute_valid_candidates.rs @@ -24,14 +24,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; // Filter wrapping related types. @@ -80,13 +79,9 @@ impl OverseerGen for DisputeValidCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let spawner = args.spawner.clone(); diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index 22b44ddd1dc3..739ed40db362 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -25,14 +25,13 @@ use futures::channel::oneshot; use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{CandidateDescriptor, CandidateReceipt}; use polkadot_node_subsystem_util::request_validators; @@ -52,7 +51,7 @@ use crate::{ // Import extra types relevant to the particular // subsystem. -use polkadot_node_subsystem::{messages::CandidateBackingMessage, SpawnGlue}; +use polkadot_node_subsystem::SpawnGlue; use std::sync::Arc; @@ -296,13 +295,9 @@ impl OverseerGen for SuggestGarbageCandidates { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { gum::info!( diff --git a/polkadot/node/malus/src/variants/support_disabled.rs b/polkadot/node/malus/src/variants/support_disabled.rs index e25df56fd643..169c442db25b 100644 --- a/polkadot/node/malus/src/variants/support_disabled.rs +++ b/polkadot/node/malus/src/variants/support_disabled.rs @@ -19,14 +19,13 @@ use polkadot_cli::{ service::{ - AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, ExtendedOverseerGenArgs, - HeaderBackend, Overseer, OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, - ParachainHost, ProvideRuntimeApi, + AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen, + OverseerGenArgs, OverseerHandle, }, validator_overseer_builder, Cli, }; use polkadot_node_subsystem::SpawnGlue; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use sp_core::traits::SpawnNamed; use crate::interceptor::*; @@ -50,13 +49,9 @@ impl OverseerGen for SupportDisabled { connector: OverseerConnector, args: OverseerGenArgs<'_, Spawner, RuntimeClient>, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { validator_overseer_builder( diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 6dcdec07ca84..83a0afc077e7 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -92,6 +92,7 @@ pub use chain_spec::{GenericChainSpec, RococoChainSpec, WestendChainSpec}; pub use consensus_common::{Proposal, SelectChain}; use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use mmr_gadget::MmrGadget; +use polkadot_node_subsystem_types::DefaultSubsystemClient; pub use polkadot_primitives::{Block, BlockId, BlockNumber, CollatorPair, Hash, Id as ParaId}; pub use sc_client_api::{Backend, CallExecutor}; pub use sc_consensus::{BlockImport, LongestChain}; @@ -1081,12 +1082,17 @@ pub fn new_full( None }; + let runtime_client = Arc::new(DefaultSubsystemClient::new( + overseer_client.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )); + let overseer_handle = if let Some(authority_discovery_service) = authority_discovery_service { let (overseer, overseer_handle) = overseer_gen - .generate::( + .generate::>( overseer_connector, OverseerGenArgs { - runtime_client: overseer_client.clone(), + runtime_client, network_service: network.clone(), sync_service: sync_service.clone(), authority_discovery_service, @@ -1099,9 +1105,6 @@ pub fn new_full( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - ), notification_services, }, ext_overseer_args, diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index b2b0786bfc24..a81673704649 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{AuthorityDiscoveryApi, Block, Error, Hash, IsParachainNode, Registry}; -use polkadot_node_subsystem_types::DefaultSubsystemClient; +use super::{Block, Error, Hash, IsParachainNode, Registry}; +use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_overseer::{DummySubsystem, InitializedOverseerBuilder, SubsystemError}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_core::traits::SpawnNamed; use polkadot_availability_distribution::IncomingRequestReceivers; @@ -40,14 +39,10 @@ use polkadot_overseer::{ }; use parking_lot::Mutex; -use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; use sc_network::{NetworkStateInfo, NotificationService}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_consensus_babe::BabeApi; use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; @@ -80,8 +75,6 @@ pub use polkadot_statement_distribution::StatementDistributionSubsystem; /// Arguments passed for overseer construction. pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, { /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. @@ -89,7 +82,7 @@ where /// Underlying network service implementation. pub network_service: Arc>, /// Underlying syncing service implementation. - pub sync_service: Arc>, + pub sync_service: Arc, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, /// Collations request receiver for network protocol v1. @@ -111,8 +104,6 @@ where pub req_protocol_names: ReqProtocolNames, /// `PeerSet` protocol names to protocols mapping. pub peerset_protocol_names: PeerSetProtocolNames, - /// The offchain transaction pool factory. - pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, /// Notification services for validation/collation protocols. pub notification_services: HashMap>, } @@ -160,7 +151,6 @@ pub fn validator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ExtendedOverseerGenArgs { @@ -180,7 +170,7 @@ pub fn validator_overseer_builder( ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, CandidateValidationSubsystem, PvfCheckerSubsystem, CandidateBackingSubsystem, @@ -190,7 +180,7 @@ pub fn validator_overseer_builder( BitfieldSigningSubsystem, BitfieldDistributionSubsystem, ProvisionerSubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, AvailabilityStoreSubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -214,8 +204,7 @@ pub fn validator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { use polkadot_node_subsystem_util::metrics::Metrics; @@ -227,11 +216,6 @@ where let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -298,7 +282,7 @@ where }) .provisioner(ProvisionerSubsystem::new(Metrics::register(registry)?)) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -339,7 +323,7 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) + .supports_parachains(runtime_client) .metrics(metrics) .spawner(spawner); @@ -367,13 +351,12 @@ pub fn collator_overseer_builder( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - offchain_transaction_pool_factory, notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, - Arc>, + Arc, DummySubsystem, DummySubsystem, DummySubsystem, @@ -383,7 +366,7 @@ pub fn collator_overseer_builder( DummySubsystem, DummySubsystem, DummySubsystem, - RuntimeApiSubsystem>, + RuntimeApiSubsystem, DummySubsystem, NetworkBridgeRxSubsystem< Arc>, @@ -407,24 +390,17 @@ pub fn collator_overseer_builder( Error, > where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, Spawner: 'static + SpawnNamed + Clone + Unpin, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, { use polkadot_node_subsystem_util::metrics::Metrics; - let metrics = ::register(registry)?; let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; - let runtime_api_client = Arc::new(DefaultSubsystemClient::new( - runtime_client.clone(), - offchain_transaction_pool_factory, - )); - let builder = Overseer::builder() .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service.clone(), @@ -475,7 +451,7 @@ where }) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( - runtime_api_client.clone(), + runtime_client.clone(), Metrics::register(registry)?, spawner.clone(), )) @@ -490,8 +466,8 @@ where .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) - .supports_parachains(runtime_api_client) - .metrics(metrics) + .supports_parachains(runtime_client) + .metrics(Metrics::register(registry)?) .spawner(spawner); let builder = if let Some(capacity) = overseer_message_channel_capacity_override { @@ -510,13 +486,9 @@ pub trait OverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin; // It would be nice to make `create_subsystems` part of this trait, @@ -533,13 +505,9 @@ impl OverseerGen for ValidatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { let ext_args = ext_args.ok_or(Error::Overseer(SubsystemError::Context( @@ -561,13 +529,9 @@ impl OverseerGen for CollatorOverseerGen { connector: OverseerConnector, args: OverseerGenArgs, _ext_args: Option, - ) -> Result< - (Overseer, Arc>>, OverseerHandle), - Error, - > + ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where - RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, - RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static, Spawner: 'static + SpawnNamed + Clone + Unpin, { collator_overseer_builder(args)? diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 7f6183076101..4039fc9127da 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -26,13 +26,13 @@ use polkadot_primitives::{ PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use sc_client_api::HeaderBackend; +use sc_client_api::{AuxStore, HeaderBackend}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; -use sp_blockchain::Info; +use sp_blockchain::{BlockStatus, Info}; use sp_consensus_babe::{BabeApi, Epoch}; -use sp_runtime::traits::{Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; /// Offers header utilities. @@ -595,3 +595,61 @@ where self.client.runtime_api().approval_voting_params(at) } } + +impl HeaderBackend for DefaultSubsystemClient +where + Client: HeaderBackend, + Block: sp_runtime::traits::Block, +{ + fn header( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result> { + self.client.header(hash) + } + + fn info(&self) -> Info { + self.client.info() + } + + fn status(&self, hash: Block::Hash) -> sc_client_api::blockchain::Result { + self.client.status(hash) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sc_client_api::blockchain::Result::Header as HeaderT>::Number>> { + self.client.number(hash) + } + + fn hash( + &self, + number: NumberFor, + ) -> sc_client_api::blockchain::Result> { + self.client.hash(number) + } +} + +impl AuxStore for DefaultSubsystemClient +where + Client: AuxStore, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + self.client.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.client.get_aux(key) + } +} diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index a601e3210dd0..22caf8950637 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -149,6 +149,14 @@ impl<'a> DummyCrate<'a> { sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) } + fn get_toolchain(&self) -> Option { + let sysroot = self.get_sysroot()?; + Path::new(sysroot.trim()) + .file_name() + .and_then(|s| s.to_str()) + .map(|s| s.to_string()) + } + fn try_build(&self) -> Result<(), Option> { let Ok(result) = self.prepare_command("build").output() else { return Err(None) }; if !result.status.success() { @@ -164,14 +172,15 @@ fn check_wasm_toolchain_installed( let dummy_crate = DummyCrate::new(&cargo_command, RuntimeTarget::Wasm); if let Err(error) = dummy_crate.try_build() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); let basic_error_message = colorize_error_message( - "Rust WASM toolchain is not properly installed; please install it!", + &format!("Rust WASM target for toolchain {toolchain} is not properly installed; please install it!") ); return match error { None => Err(basic_error_message), Some(error) if error.contains("the `wasm32-unknown-unknown` target may not be installed") => { - Err(colorize_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ - You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")) + Err(colorize_error_message(&format!("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown --toolchain {toolchain}` if you're using `rustup`."))) }, // Apparently this can happen when we're running on a non Tier 1 platform. Some(ref error) if error.contains("linker `rust-lld` not found") => @@ -193,9 +202,10 @@ fn check_wasm_toolchain_installed( let src_path = Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); if !src_path.exists() { + let toolchain = dummy_crate.get_toolchain().unwrap_or("".to_string()); return Err(colorize_error_message( - "Cannot compile the WASM runtime: no standard library sources found!\n\ - You can install them with `rustup component add rust-src` if you're using `rustup`.", + &format!("Cannot compile the WASM runtime: no standard library sources found at {}!\n\ + You can install them with `rustup component add rust-src --toolchain {toolchain}` if you're using `rustup`.", src_path.display()), )) } } From 12ce4f7d049b70918cadb658de4bbe8eb6ffc670 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Wed, 28 Feb 2024 18:32:02 +1100 Subject: [PATCH 51/51] Runtime Upgrade ref docs and Single Block Migration example pallet (#1554) Closes https://github.com/paritytech/polkadot-sdk-docs/issues/55 - Changes 'current storage version' terminology to less ambiguous 'in-code storage version' (suggestion by @ggwpez) - Adds a new example pallet `pallet-example-single-block-migrations` - Adds a new reference doc to replace https://docs.substrate.io/maintain/runtime-upgrades/ (temporarily living in the pallet while we wait for developer hub PR to merge) - Adds documentation for the `storage_alias` macro - Improves `trait Hooks` docs - Improves `trait GetStorageVersion` docs - Update the suggested patterns for using `VersionedMigration`, so that version unchecked migrations are never exported - Prevents accidental usage of version unchecked migrations in runtimes https://github.com/paritytech/substrate/pull/14421#discussion_r1255467895 - Unversioned migration code is kept inside `mod version_unchecked`, versioned code is kept in `pub mod versioned` - It is necessary to use modules to limit visibility because the inner migration must be `pub`. See https://github.com/rust-lang/rust/issues/30905 and https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 for more. ### todo - [x] move to reference docs to proper place within sdk-docs (now that https://github.com/paritytech/polkadot-sdk/pull/2102 is merged) - [x] prdoc --------- Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Juan Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> Co-authored-by: gupnik --- Cargo.lock | 26 ++ Cargo.toml | 1 + cumulus/pallets/collator-selection/src/lib.rs | 2 +- .../collator-selection/src/migration.rs | 10 +- .../pallets/parachain-system/src/migration.rs | 2 +- cumulus/pallets/xcmp-queue/src/migration.rs | 2 +- .../pallets/collective-content/src/lib.rs | 2 +- .../assets/asset-hub-rococo/src/lib.rs | 14 +- .../assets/asset-hub-westend/src/lib.rs | 6 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 4 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 4 +- docs/sdk/Cargo.toml | 7 +- .../reference_docs/frame_runtime_migration.rs | 9 - .../frame_runtime_upgrades_and_migrations.rs | 138 +++++++++++ docs/sdk/src/reference_docs/mod.rs | 5 +- .../node/service/src/parachains_db/upgrade.rs | 2 +- .../common/src/assigned_slots/migration.rs | 12 +- .../runtime/common/src/assigned_slots/mod.rs | 2 +- .../runtime/common/src/crowdloan/migration.rs | 4 +- polkadot/runtime/common/src/crowdloan/mod.rs | 2 +- .../runtime/common/src/paras_registrar/mod.rs | 2 +- .../runtime/parachains/src/configuration.rs | 2 +- polkadot/runtime/parachains/src/disputes.rs | 2 +- .../parachains/src/session_info/migration.rs | 2 +- prdoc/pr_1554.prdoc | 14 ++ substrate/client/executor/src/executor.rs | 14 +- substrate/frame/alliance/src/migration.rs | 8 +- substrate/frame/assets/src/lib.rs | 2 +- substrate/frame/assets/src/migration.rs | 20 +- substrate/frame/balances/src/lib.rs | 2 +- substrate/frame/balances/src/migration.rs | 8 +- substrate/frame/collective/src/lib.rs | 2 +- substrate/frame/contracts/src/lib.rs | 2 +- substrate/frame/contracts/src/migration.rs | 20 +- .../frame/contracts/src/migration/v09.rs | 14 +- .../frame/contracts/src/migration/v10.rs | 24 +- .../frame/contracts/src/migration/v11.rs | 12 +- .../frame/contracts/src/migration/v12.rs | 40 ++-- .../frame/contracts/src/migration/v13.rs | 10 +- .../frame/contracts/src/migration/v14.rs | 20 +- .../frame/contracts/src/migration/v15.rs | 16 +- substrate/frame/democracy/src/lib.rs | 2 +- .../election-provider-multi-phase/src/lib.rs | 2 +- .../src/migrations.rs | 4 +- substrate/frame/elections-phragmen/src/lib.rs | 2 +- substrate/frame/examples/Cargo.toml | 3 + .../single-block-migrations/Cargo.toml | 61 +++++ .../single-block-migrations/src/lib.rs | 213 +++++++++++++++++ .../src/migrations/mod.rs | 20 ++ .../src/migrations/v1.rs | 222 ++++++++++++++++++ .../single-block-migrations/src/mock.rs | 69 ++++++ substrate/frame/examples/src/lib.rs | 3 + .../frame/fast-unstake/src/migrations.rs | 4 +- substrate/frame/grandpa/src/lib.rs | 2 +- substrate/frame/im-online/src/lib.rs | 2 +- substrate/frame/im-online/src/migration.rs | 2 +- substrate/frame/membership/src/lib.rs | 2 +- substrate/frame/multisig/src/lib.rs | 2 +- substrate/frame/multisig/src/migrations.rs | 2 +- substrate/frame/nfts/src/lib.rs | 2 +- substrate/frame/nfts/src/migration.rs | 16 +- substrate/frame/nomination-pools/src/lib.rs | 2 +- .../frame/nomination-pools/src/migration.rs | 28 +-- substrate/frame/preimage/src/lib.rs | 2 +- substrate/frame/referenda/src/lib.rs | 2 +- substrate/frame/referenda/src/migration.rs | 16 +- substrate/frame/scheduler/src/lib.rs | 2 +- substrate/frame/session/src/historical/mod.rs | 2 +- substrate/frame/session/src/lib.rs | 2 +- substrate/frame/society/src/migrations.rs | 54 ++--- substrate/frame/society/src/tests.rs | 44 ++-- substrate/frame/staking/src/migrations.rs | 12 +- substrate/frame/staking/src/pallet/mod.rs | 2 +- substrate/frame/support/procedural/src/lib.rs | 38 ++- .../procedural/src/pallet/expand/hooks.rs | 22 +- .../src/pallet/expand/pallet_struct.rs | 6 +- .../src/pallet/parse/pallet_struct.rs | 2 +- substrate/frame/support/src/lib.rs | 4 +- substrate/frame/support/src/migrations.rs | 55 +++-- substrate/frame/support/src/traits/hooks.rs | 70 +++--- .../frame/support/src/traits/metadata.rs | 71 ++++-- .../support/test/tests/construct_runtime.rs | 2 +- substrate/frame/support/test/tests/pallet.rs | 22 +- .../compare_unset_storage_version.rs | 2 +- .../compare_unset_storage_version.stderr | 2 +- substrate/frame/system/src/lib.rs | 2 +- substrate/frame/tips/src/lib.rs | 2 +- 87 files changed, 1223 insertions(+), 370 deletions(-) delete mode 100644 docs/sdk/src/reference_docs/frame_runtime_migration.rs create mode 100644 docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs create mode 100644 prdoc/pr_1554.prdoc create mode 100644 substrate/frame/examples/single-block-migrations/Cargo.toml create mode 100644 substrate/frame/examples/single-block-migrations/src/lib.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/migrations/mod.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/migrations/v1.rs create mode 100644 substrate/frame/examples/single-block-migrations/src/mock.rs diff --git a/Cargo.lock b/Cargo.lock index 27ba43780eda..026786378d5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9965,6 +9965,26 @@ dependencies = [ "sp-std 14.0.0", ] +[[package]] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +dependencies = [ + "docify", + "frame-executive", + "frame-support", + "frame-system", + "frame-try-runtime", + "log", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 14.0.0", + "sp-version", +] + [[package]] name = "pallet-example-split" version = "10.0.0" @@ -10006,6 +10026,7 @@ dependencies = [ "pallet-example-frame-crate", "pallet-example-kitchensink", "pallet-example-offchain-worker", + "pallet-example-single-block-migrations", "pallet-example-split", "pallet-example-tasks", ] @@ -13385,6 +13406,8 @@ dependencies = [ "cumulus-pallet-parachain-system", "docify", "frame", + "frame-executive", + "frame-support", "frame-system", "kitchensink-runtime", "pallet-aura", @@ -13393,9 +13416,11 @@ dependencies = [ "pallet-collective", "pallet-default-config-example", "pallet-democracy", + "pallet-example-single-block-migrations", "pallet-examples", "pallet-multisig", "pallet-proxy", + "pallet-scheduler", "pallet-timestamp", "pallet-transaction-payment", "pallet-utility", @@ -13418,6 +13443,7 @@ dependencies = [ "sp-io", "sp-keyring", "sp-runtime", + "sp-version", "staging-chain-spec-builder", "staging-node-cli", "staging-parachain-info", diff --git a/Cargo.toml b/Cargo.toml index 1d27cfe95392..654367cc1e21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -337,6 +337,7 @@ members = [ "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", + "substrate/frame/examples/single-block-migrations", "substrate/frame/examples/split", "substrate/frame/examples/tasks", "substrate/frame/executive", diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 7449f4d68c7e..fb4c4a445df2 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -118,7 +118,7 @@ pub mod pallet { use sp_staking::SessionIndex; use sp_std::vec::Vec; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); type BalanceOf = diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 58b4cc5b06a1..f384981dbae8 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -31,8 +31,8 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 { let invulnerables_len = Invulnerables::::get().to_vec().len(); >::mutate(|invulnerables| { invulnerables.sort(); @@ -45,7 +45,7 @@ pub mod v1 { invulnerables_len, ); // Similar complexity to `set_invulnerables` (put storage value) - // Plus 1 read for length, 1 read for `onchain_version`, 1 write to put version + // Plus 1 read for length, 1 read for `on_chain_version`, 1 write to put version T::WeightInfo::set_invulnerables(invulnerables_len as u32) .saturating_add(T::DbWeight::get().reads_writes(2, 1)) } else { @@ -83,8 +83,8 @@ pub mod v1 { "after migration, there should be the same number of invulnerables" ); - let onchain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(onchain_version >= 1, "must_upgrade"); + let on_chain_version = Pallet::::on_chain_storage_version(); + frame_support::ensure!(on_chain_version >= 1, "must_upgrade"); Ok(()) } diff --git a/cumulus/pallets/parachain-system/src/migration.rs b/cumulus/pallets/parachain-system/src/migration.rs index a92f85b9cd42..30106aceab5a 100644 --- a/cumulus/pallets/parachain-system/src/migration.rs +++ b/cumulus/pallets/parachain-system/src/migration.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::Weight, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Migrates the pallet storage to the most recent version. diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6c86c3011d23..c7fa61a3e3f0 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -24,7 +24,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight}, }; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; diff --git a/cumulus/parachains/pallets/collective-content/src/lib.rs b/cumulus/parachains/pallets/collective-content/src/lib.rs index 7a685858accb..b1c960ad6a0d 100644 --- a/cumulus/parachains/pallets/collective-content/src/lib.rs +++ b/cumulus/parachains/pallets/collective-content/src/lib.rs @@ -59,7 +59,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::{traits::BadOrigin, Saturating}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 81bb66a56a29..6791dc4064ed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -986,37 +986,37 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Multisig::on_chain_storage_version() == StorageVersion::new(0) { - Multisig::current_storage_version().put::(); + Multisig::in_code_storage_version().put::(); writes.saturating_inc(); } if Assets::on_chain_storage_version() == StorageVersion::new(0) { - Assets::current_storage_version().put::(); + Assets::in_code_storage_version().put::(); writes.saturating_inc(); } if Uniques::on_chain_storage_version() == StorageVersion::new(0) { - Uniques::current_storage_version().put::(); + Uniques::in_code_storage_version().put::(); writes.saturating_inc(); } if Nfts::on_chain_storage_version() == StorageVersion::new(0) { - Nfts::current_storage_version().put::(); + Nfts::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 3f58c679eeed..5352eeb1a1b7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1037,17 +1037,17 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { - ForeignAssets::current_storage_version().put::(); + ForeignAssets::in_code_storage_version().put::(); writes.saturating_inc(); } if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { - PoolAssets::current_storage_version().put::(); + PoolAssets::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index f576776696c3..db1dfbd45d63 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -168,12 +168,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index bd42a33370dc..1c8c9aa20755 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -142,12 +142,12 @@ impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { let mut writes = 0; if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { - PolkadotXcm::current_storage_version().put::(); + PolkadotXcm::in_code_storage_version().put::(); writes.saturating_inc(); } if Balances::on_chain_storage_version() == StorageVersion::new(0) { - Balances::current_storage_version().put::(); + Balances::in_code_storage_version().put::(); writes.saturating_inc(); } diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 576a81834f8c..b1b60a2d77db 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -33,6 +33,10 @@ node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } subkey = { path = "../../substrate/bin/utils/subkey" } +frame-system = { path = "../../substrate/frame/system", default-features = false } +frame-support = { path = "../../substrate/frame/support", default-features = false } +frame-executive = { path = "../../substrate/frame/executive", default-features = false } +pallet-example-single-block-migrations = { path = "../../substrate/frame/examples/single-block-migrations" } # Substrate sc-network = { path = "../../substrate/client/network" } @@ -66,7 +70,7 @@ pallet-proxy = { path = "../../substrate/frame/proxy" } pallet-authorship = { path = "../../substrate/frame/authorship" } pallet-collective = { path = "../../substrate/frame/collective" } pallet-democracy = { path = "../../substrate/frame/democracy" } -frame-system = { path = "../../substrate/frame/system" } +pallet-scheduler = { path = "../../substrate/frame/scheduler" } # Primitives sp-io = { path = "../../substrate/primitives/io" } @@ -74,6 +78,7 @@ sp-api = { path = "../../substrate/primitives/api" } sp-core = { path = "../../substrate/primitives/core" } sp-keyring = { path = "../../substrate/primitives/keyring" } sp-runtime = { path = "../../substrate/primitives/runtime" } +sp-version = { path = "../../substrate/primitives/version" } # XCM xcm = { package = "staging-xcm", path = "../../polkadot/xcm" } diff --git a/docs/sdk/src/reference_docs/frame_runtime_migration.rs b/docs/sdk/src/reference_docs/frame_runtime_migration.rs deleted file mode 100644 index 0616ccbb6f57..000000000000 --- a/docs/sdk/src/reference_docs/frame_runtime_migration.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! # Runtime Runtime Upgrade and Testing -//! -//! -//! Notes: -//! -//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram -//! as source of truth. -//! - Data migration and when it is needed. -//! - Look into the pba-lecture. diff --git a/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs new file mode 100644 index 000000000000..7d870b432218 --- /dev/null +++ b/docs/sdk/src/reference_docs/frame_runtime_upgrades_and_migrations.rs @@ -0,0 +1,138 @@ +//! # Runtime Upgrades +//! +//! At their core, blockchain logic consists of +//! +//! 1. on-chain state and +//! 2. a state transition function +//! +//! In Substrate-based blockchains, state transition functions are referred to as +//! [runtimes](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/blockchain_state_machines/index.html). +//! +//! Traditionally, before Substrate, upgrading state transition functions required node +//! operators to download new software and restart their nodes in a process called +//! [forking](https://en.wikipedia.org/wiki/Fork_(blockchain)). +//! +//! Substrate-based blockchains do not require forking, and instead upgrade runtimes +//! in a process called "Runtime Upgrades". +//! +//! Forkless runtime upgrades are a defining feature of the Substrate framework. Updating the +//! runtime logic without forking the code base enables your blockchain to seemlessly evolve +//! over time in a deterministic, rules-based manner. It also removes ambiguity for node operators +//! and other participants in the network about what is the canonical runtime. +//! +//! This capability is possible due to the runtime of a blockchain existing in on-chain storage. +//! +//! ## Performing a Runtime Upgrade +//! +//! To upgrade a runtime, an [`Origin`](frame_system::RawOrigin) with the necesarry permissions +//! (usually via governance) changes the `:code` storage. Usually, this is performed via a call to +//! [`set_code`] (or [`set_code_without_checks`]) with the desired new runtime blob, scheduled +//! using [`pallet_scheduler`]. +//! +//! Prior to building the new runtime, don't forget to update the +//! [`RuntimeVersion`](sp_version::RuntimeVersion). +//! +//! # Migrations +//! +//! It is often desirable to define logic to execute immediately after runtime upgrades (see +//! [this diagram](frame::traits::Hooks)). +//! +//! Self-contained pieces of logic that execute after a runtime upgrade are called "Migrations". +//! +//! The typical use case of a migration is to 'migrate' pallet storage from one layout to another, +//! for example when the encoding of a storage item is changed. However, they can also execute +//! arbitary logic such as: +//! +//! - Calling arbitrary pallet methods +//! - Mutating arbitrary on-chain state +//! - Cleaning up some old storage items that are no longer needed +//! +//! ## Single Block Migrations +//! +//! - Execute immediately and entirely at the beginning of the block following +//! a runtime upgrade. +//! - Are suitable for migrations which are guaranteed to not exceed the block weight. +//! - Are simply implementations of [`OnRuntimeUpgrade`]. +//! +//! To learn best practices for writing single block pallet storage migrations, see the +//! [Single Block Migration Example Pallet](pallet_example_single_block_migrations). +//! +//! ### Scheduling the Single Block Migrations to Run Next Runtime Upgrade +//! +//! Schedule migrations to run next runtime upgrade passing them as a generic parameter to your +//! [`Executive`](frame_executive) pallet: +//! +//! ```ignore +//! /// Tuple of migrations (structs that implement `OnRuntimeUpgrade`) +//! type Migrations = ( +//! pallet_example_storage_migration::migrations::v1::versioned::MigrateV0ToV1, +//! MyCustomMigration, +//! // ...more migrations here +//! ); +//! pub type Executive = frame_executive::Executive< +//! Runtime, +//! Block, +//! frame_system::ChainContext, +//! Runtime, +//! AllPalletsWithSystem, +//! Migrations, // <-- pass your migrations to Executive here +//! >; +//! ``` +//! +//! ### Ensuring Single Block Migration Safety +//! +//! "My migration unit tests pass, so it should be safe to deploy right?" +//! +//! No! Unit tests execute the migration in a very simple test environment, and cannot account +//! for the complexities of a real runtime or real on-chain state. +//! +//! Prior to deploying migrations, it is critical to perform additional checks to ensure that when +//! run in our real runtime they will not brick the chain due to: +//! - Panicing +//! - Touching too many storage keys and resulting in an excessively large PoV +//! - Taking too long to execute +//! +//! [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) has a sub-command +//! [`on-runtime-upgrade`](https://paritytech.github.io/try-runtime-cli/try_runtime_core/commands/enum.Action.html#variant.OnRuntimeUpgrade) +//! which is designed to help with exactly this. +//! +//! Developers MUST run this command before deploying migrations to ensure they will not +//! inadvertently result in a bricked chain. +//! +//! It is recommended to run as part of your CI pipeline. See the +//! [polkadot-sdk check-runtime-migration job](https://github.com/paritytech/polkadot-sdk/blob/4a293bc5a25be637c06ce950a34490706597615b/.gitlab/pipeline/check.yml#L103-L124) +//! for an example of how to configure this. +//! +//! ### Note on the Manipulability of PoV Size and Execution Time +//! +//! While [`try-runtime-cli`](https://github.com/paritytech/try-runtime-cli) can help ensure with +//! very high certainty that a migration will succeed given **existing** on-chain state, it cannot +//! prevent a malicious actor from manipulating state in a way that will cause the migration to take +//! longer or produce a PoV much larger than previously measured. +//! +//! Therefore, it is important to write migrations in such a way that the execution time or PoV size +//! it adds to the block cannot be easily manipulated. e.g., do not iterate over storage that can +//! quickly or cheaply be bloated. +//! +//! If writing your migration in such a way is not possible, a multi block migration should be used +//! instead. +//! +//! ### Other useful tools +//! +//! [`Chopsticks`](https://github.com/AcalaNetwork/chopsticks) is another tool in the Substrate +//! ecosystem which developers may find useful to use in addition to `try-runtime-cli` when testing +//! their single block migrations. +//! +//! ## Multi Block Migrations +//! +//! Safely and easily execute long-running migrations across multiple blocks. +//! +//! Suitable for migrations which could use arbitrary amounts of block weight. +//! +//! TODO: Link to multi block migration example/s once PR is merged (). +//! +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`StorageVersion`]: frame_support::traits::StorageVersion +//! [`set_code`]: frame_system::Call::set_code +//! [`set_code_without_checks`]: frame_system::Call::set_code_without_checks diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs index 760bb442c166..b2c751420ce7 100644 --- a/docs/sdk/src/reference_docs/mod.rs +++ b/docs/sdk/src/reference_docs/mod.rs @@ -92,9 +92,8 @@ pub mod cli; // TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 pub mod consensus_swapping; -/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. -// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 -pub mod frame_runtime_migration; +/// Learn about Runtime Upgrades and best practices for writing Migrations. +pub mod frame_runtime_upgrades_and_migrations; /// Learn about light nodes, how they function, and how Substrate-based chains come /// light-node-first out of the box. diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index d22eebb5c8d4..2eceb391b15c 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -93,7 +93,7 @@ pub(crate) fn try_upgrade_db( } /// Try upgrading parachain's database to the next version. -/// If successfull, it returns the current version. +/// If successful, it returns the current version. pub(crate) fn try_upgrade_db_to_next_version( db_path: &Path, db_kind: DatabaseKind, diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index ba3108c0aa38..def6bad692a2 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -29,14 +29,14 @@ pub mod v1 { impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version < 1, "assigned_slots::MigrateToV1 migration can be deleted"); Ok(Default::default()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version < 1 { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version < 1 { const MAX_PERMANENT_SLOTS: u32 = 100; const MAX_TEMPORARY_SLOTS: u32 = 100; @@ -52,8 +52,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "assigned_slots::MigrateToV1 needs to be run"); assert_eq!(>::get(), 100); assert_eq!(>::get(), 100); Ok(()) diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 3419e3497f7d..4f032f4dfa3b 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -107,7 +107,7 @@ type LeasePeriodOf = <::Leaser as Leaser>>::Le pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/crowdloan/migration.rs b/polkadot/runtime/common/src/crowdloan/migration.rs index 5133c14ada92..3afd6b3fbc94 100644 --- a/polkadot/runtime/common/src/crowdloan/migration.rs +++ b/polkadot/runtime/common/src/crowdloan/migration.rs @@ -24,9 +24,9 @@ use frame_support::{ pub struct MigrateToTrackInactiveV2(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToTrackInactiveV2 { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { let mut translated = 0u64; for item in Funds::::iter_values() { let b = diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 7d1b892dfa7f..35d075f2ff6c 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -180,7 +180,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 5450c4309e40..4541b88ec6fe 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index 7cc5b31fc8fd..b0e9d03df886 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -505,7 +505,7 @@ impl WeightInfo for TestWeightInfo { pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. /// /// v0-v1: /// v1-v2: diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index c2383dad3053..da95b060c14a 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -379,7 +379,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/polkadot/runtime/parachains/src/session_info/migration.rs b/polkadot/runtime/parachains/src/session_info/migration.rs index 228c1e3bb251..ea6f81834b5d 100644 --- a/polkadot/runtime/parachains/src/session_info/migration.rs +++ b/polkadot/runtime/parachains/src/session_info/migration.rs @@ -18,5 +18,5 @@ use frame_support::traits::StorageVersion; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/prdoc/pr_1554.prdoc b/prdoc/pr_1554.prdoc new file mode 100644 index 000000000000..bfce7c5edafd --- /dev/null +++ b/prdoc/pr_1554.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Runtime Upgrade ref docs and Single Block Migration example pallet + +doc: + - audience: Runtime Dev + description: | + `frame_support::traits::GetStorageVersion::current_storage_version` has been renamed `frame_support::traits::GetStorageVersion::in_code_storage_version`. + A simple find-replace is sufficient to handle this change. + +crates: + - name: "frame-support" + diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 499bb704b169..d56a3b389ef4 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -518,7 +518,7 @@ where runtime_code, ext, heap_alloc_strategy, - |_, mut instance, _onchain_version, mut ext| { + |_, mut instance, _on_chain_version, mut ext| { with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) }, ); @@ -682,18 +682,18 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, heap_alloc_strategy, - |_, mut instance, onchain_version, mut ext| { - let onchain_version = - onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; + |_, mut instance, on_chain_version, mut ext| { + let on_chain_version = + on_chain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; let can_call_with = - onchain_version.can_call_with(&self.native_version.runtime_version); + on_chain_version.can_call_with(&self.native_version.runtime_version); if use_native && can_call_with { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution succeeded", ); @@ -705,7 +705,7 @@ impl CodeExecutor for NativeElseWasmExecut tracing::trace!( target: "executor", native = %self.native_version.runtime_version, - chain = %onchain_version, + chain = %on_chain_version, "Request for native execution failed", ); } diff --git a/substrate/frame/alliance/src/migration.rs b/substrate/frame/alliance/src/migration.rs index e3a44a7887e9..432f09a16f47 100644 --- a/substrate/frame/alliance/src/migration.rs +++ b/substrate/frame/alliance/src/migration.rs @@ -19,19 +19,19 @@ use crate::{Config, Pallet, Weight, LOG_TARGET}; use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; use log; -/// The current storage version. +/// The in-code storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight: Weight = Weight::zero(); - if onchain_version < 1 { + if on_chain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); } - if onchain_version < 2 { + if on_chain_version < 2 { weight = weight.saturating_add(v1_to_v2::migrate::()); } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index cafe7bb1a3b5..60316f8cdcf1 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { }; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/assets/src/migration.rs b/substrate/frame/assets/src/migration.rs index ff0ffbff0d36..dd7c12293e80 100644 --- a/substrate/frame/assets/src/migration.rs +++ b/substrate/frame/assets/src/migration.rs @@ -67,9 +67,9 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 && current_version == 1 { + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; Asset::::translate::< OldAssetDetails>, @@ -78,12 +78,12 @@ pub mod v1 { translated.saturating_inc(); Some(old_value.migrate_to_v1()) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} pools, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + 1, translated + 1) } else { @@ -116,13 +116,13 @@ pub mod v1 { "the asset count before and after the migration should be the same" ); - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - frame_support::ensure!(current_version == 1, "must_upgrade"); + frame_support::ensure!(in_code_version == 1, "must_upgrade"); ensure!( - current_version == onchain_version, - "after migration, the current_version and onchain_version should be the same" + in_code_version == on_chain_version, + "after migration, the in_code_version and on_chain_version should be the same" ); Asset::::iter().try_for_each(|(_id, asset)| -> Result<(), TryRuntimeError> { diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 7dd087eabd63..e87aa6f9311e 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -320,7 +320,7 @@ pub mod pallet { type MaxFreezes: Get; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = frame_support::traits::StorageVersion::new(1); diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index ba6819ec6e81..38d9c07ff7e0 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -22,9 +22,9 @@ use frame_support::{ }; fn migrate_v0_to_v1, I: 'static>(accounts: &[T::AccountId]) -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 0 { + if on_chain_version == 0 { let total = accounts .iter() .map(|a| Pallet::::total_balance(a)) @@ -76,9 +76,9 @@ impl, A: Get>, I: 'static> OnRuntimeUpgrade pub struct ResetInactive(PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for ResetInactive { fn on_runtime_upgrade() -> Weight { - let onchain_version = Pallet::::on_chain_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); - if onchain_version == 1 { + if on_chain_version == 1 { // Remove the old `StorageVersion` type. frame_support::storage::unhashed::kill(&frame_support::storage::storage_prefix( Pallet::::name().as_bytes(), diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index c084784e0a9b..dd481b536814 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -176,7 +176,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index d8939ee88baf..32d789a458f9 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -234,7 +234,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. pub(crate) const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 6d61cb6b1e1a..f30ae1ebfade 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -263,10 +263,10 @@ impl Migration { impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { let name = >::name(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); let on_chain_version = >::on_chain_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { log::warn!( target: LOG_TARGET, "{name}: No Migration performed storage_version = latest_version = {:?}", @@ -289,7 +289,7 @@ impl OnRuntimeUpgrade for Migration OnRuntimeUpgrade for Migration>::on_chain_storage_version(); - let current_version = >::current_storage_version(); + let in_code_version = >::in_code_storage_version(); - if on_chain_version == current_version { + if on_chain_version == in_code_version { return Ok(Default::default()) } log::debug!( target: LOG_TARGET, - "Requested migration of {} from {:?}(on-chain storage version) to {:?}(current storage version)", - >::name(), on_chain_version, current_version + "Requested migration of {} from {:?}(on-chain storage version) to {:?}(in-code storage version)", + >::name(), on_chain_version, in_code_version ); ensure!( - T::Migrations::is_upgrade_supported(on_chain_version, current_version), - "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, current storage version)" + T::Migrations::is_upgrade_supported(on_chain_version, in_code_version), + "Unsupported upgrade: VERSION_RANGE should be (on-chain storage version + 1, in-code storage version)" ); Ok(Default::default()) @@ -421,7 +421,7 @@ impl Migration { }, StepResult::Completed { steps_done } => { in_progress_version.put::>(); - if >::current_storage_version() != in_progress_version { + if >::in_code_storage_version() != in_progress_version { log::info!( target: LOG_TARGET, "{name}: Next migration is {:?},", diff --git a/substrate/frame/contracts/src/migration/v09.rs b/substrate/frame/contracts/src/migration/v09.rs index 98fcccc2c0be..f19bff9d6747 100644 --- a/substrate/frame/contracts/src/migration/v09.rs +++ b/substrate/frame/contracts/src/migration/v09.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound, Identity}; use sp_runtime::TryRuntimeError; use sp_std::prelude::*; -mod old { +mod v8 { use super::*; #[derive(Encode, Decode)] @@ -50,14 +50,14 @@ mod old { #[cfg(feature = "runtime-benchmarks")] pub fn store_old_dummy_code(len: usize) { use sp_runtime::traits::Hash; - let module = old::PrefabWasmModule { + let module = v8::PrefabWasmModule { instruction_weights_version: 0, initial: 0, maximum: 0, code: vec![42u8; len], }; let hash = T::Hashing::hash(&module.code); - old::CodeStorage::::insert(hash, module); + v8::CodeStorage::::insert(hash, module); } #[derive(Encode, Decode)] @@ -89,9 +89,9 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::CodeStorage::::iter_from(old::CodeStorage::::hashed_key_for(last_key)) + v8::CodeStorage::::iter_from(v8::CodeStorage::::hashed_key_for(last_key)) } else { - old::CodeStorage::::iter() + v8::CodeStorage::::iter() }; if let Some((key, old)) = iter.next() { @@ -115,7 +115,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::CodeStorage::::iter().take(100).collect(); + let sample: Vec<_> = v8::CodeStorage::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len()); Ok(sample.encode()) @@ -123,7 +123,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = , old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) + let sample = , v8::PrefabWasmModule)> as Decode>::decode(&mut &state[..]) .expect("pre_upgrade_step provides a valid state; qed"); log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len()); diff --git a/substrate/frame/contracts/src/migration/v10.rs b/substrate/frame/contracts/src/migration/v10.rs index d64673aac7d2..1bee86e6a773 100644 --- a/substrate/frame/contracts/src/migration/v10.rs +++ b/substrate/frame/contracts/src/migration/v10.rs @@ -47,7 +47,7 @@ use sp_runtime::{ }; use sp_std::prelude::*; -mod old { +mod v9 { use super::*; pub type BalanceOf = ( ) where OldCurrency: ReservableCurrency<::AccountId> + 'static, { - let info = old::ContractInfo { + let info = v9::ContractInfo { trie_id: info.trie_id, code_hash: info.code_hash, storage_bytes: Default::default(), @@ -94,7 +94,7 @@ pub fn store_old_contract_info( storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v9::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)] @@ -120,9 +120,9 @@ where pub code_hash: CodeHash, storage_bytes: u32, storage_items: u32, - pub storage_byte_deposit: old::BalanceOf, - storage_item_deposit: old::BalanceOf, - storage_base_deposit: old::BalanceOf, + pub storage_byte_deposit: v9::BalanceOf, + storage_item_deposit: v9::BalanceOf, + storage_base_deposit: v9::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -152,7 +152,7 @@ fn deposit_address( impl MigrationStep for Migration where OldCurrency: ReservableCurrency<::AccountId> - + Inspect<::AccountId, Balance = old::BalanceOf>, + + Inspect<::AccountId, Balance = v9::BalanceOf>, { const VERSION: u16 = 10; @@ -162,11 +162,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from( - old::ContractInfoOf::::hashed_key_for(last_account), + v9::ContractInfoOf::::iter_from( + v9::ContractInfoOf::::hashed_key_for(last_account), ) } else { - old::ContractInfoOf::::iter() + v9::ContractInfoOf::::iter() }; if let Some((account, contract)) = iter.next() { @@ -276,7 +276,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(10).collect(); + let sample: Vec<_> = v9::ContractInfoOf::::iter().take(10).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); Ok(sample.encode()) @@ -284,7 +284,7 @@ where #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { - let sample = )> as Decode>::decode( + let sample = )> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/contracts/src/migration/v11.rs b/substrate/frame/contracts/src/migration/v11.rs index a5b11f6e0897..9bfbb25edfb4 100644 --- a/substrate/frame/contracts/src/migration/v11.rs +++ b/substrate/frame/contracts/src/migration/v11.rs @@ -29,7 +29,7 @@ use sp_runtime::TryRuntimeError; use codec::{Decode, Encode}; use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_std::{marker::PhantomData, prelude::*}; -mod old { +mod v10 { use super::*; #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -51,11 +51,11 @@ pub struct DeletionQueueManager { #[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))] pub fn fill_old_queue(len: usize) { - let queue: Vec = - core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() }) + let queue: Vec = + core::iter::repeat_with(|| v10::DeletedContract { trie_id: Default::default() }) .take(len) .collect(); - old::DeletionQueue::::set(Some(queue)); + v10::DeletionQueue::::set(Some(queue)); } #[storage_alias] @@ -80,7 +80,7 @@ impl MigrationStep for Migration { } fn step(&mut self) -> (IsFinished, Weight) { - let Some(old_queue) = old::DeletionQueue::::take() else { + let Some(old_queue) = v10::DeletionQueue::::take() else { return (IsFinished::Yes, Weight::zero()) }; let len = old_queue.len(); @@ -106,7 +106,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let old_queue = old::DeletionQueue::::take().unwrap_or_default(); + let old_queue = v10::DeletionQueue::::take().unwrap_or_default(); if old_queue.is_empty() { let len = 10u32; diff --git a/substrate/frame/contracts/src/migration/v12.rs b/substrate/frame/contracts/src/migration/v12.rs index 7dee31503101..d9128286df38 100644 --- a/substrate/frame/contracts/src/migration/v12.rs +++ b/substrate/frame/contracts/src/migration/v12.rs @@ -34,7 +34,7 @@ use sp_runtime::TryRuntimeError; use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128, Saturating}; use sp_std::prelude::*; -mod old { +mod v11 { use super::*; pub type BalanceOf = , #[codec(compact)] - deposit: old::BalanceOf, + deposit: v11::BalanceOf, #[codec(compact)] refcount: u64, determinism: Determinism, @@ -112,17 +112,17 @@ where let hash = T::Hashing::hash(&code); PristineCode::::insert(hash, code.clone()); - let module = old::PrefabWasmModule { + let module = v11::PrefabWasmModule { instruction_weights_version: Default::default(), initial: Default::default(), maximum: Default::default(), code, determinism: Determinism::Enforced, }; - old::CodeStorage::::insert(hash, module); + v11::CodeStorage::::insert(hash, module); - let info = old::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; - old::OwnerInfoOf::::insert(hash, info); + let info = v11::OwnerInfo { owner: account, deposit: u32::MAX.into(), refcount: u64::MAX }; + v11::OwnerInfoOf::::insert(hash, info); } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -148,16 +148,16 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_key) = self.last_code_hash.take() { - old::OwnerInfoOf::::iter_from( - old::OwnerInfoOf::::hashed_key_for(last_key), + v11::OwnerInfoOf::::iter_from( + v11::OwnerInfoOf::::hashed_key_for(last_key), ) } else { - old::OwnerInfoOf::::iter() + v11::OwnerInfoOf::::iter() }; if let Some((hash, old_info)) = iter.next() { log::debug!(target: LOG_TARGET, "Migrating OwnerInfo for code_hash {:?}", hash); - let module = old::CodeStorage::::take(hash) + let module = v11::CodeStorage::::take(hash) .expect(format!("No PrefabWasmModule found for code_hash: {:?}", hash).as_str()); let code_len = module.code.len(); @@ -184,7 +184,7 @@ where let bytes_before = module .encoded_size() .saturating_add(code_len) - .saturating_add(old::OwnerInfo::::max_encoded_len()) + .saturating_add(v11::OwnerInfo::::max_encoded_len()) as u32; let items_before = 3u32; let deposit_expected_before = price_per_byte @@ -241,10 +241,10 @@ where fn pre_upgrade_step() -> Result, TryRuntimeError> { let len = 100; log::debug!(target: LOG_TARGET, "Taking sample of {} OwnerInfo(s)", len); - let sample: Vec<_> = old::OwnerInfoOf::::iter() + let sample: Vec<_> = v11::OwnerInfoOf::::iter() .take(len) .map(|(k, v)| { - let module = old::CodeStorage::::get(k) + let module = v11::CodeStorage::::get(k) .expect("No PrefabWasmModule found for code_hash: {:?}"); let info: CodeInfo = CodeInfo { determinism: module.determinism, @@ -258,9 +258,9 @@ where .collect(); let storage: u32 = - old::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); - let mut deposit: old::BalanceOf = Default::default(); - old::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); + v11::CodeStorage::::iter().map(|(_k, v)| v.encoded_size() as u32).sum(); + let mut deposit: v11::BalanceOf = Default::default(); + v11::OwnerInfoOf::::iter().for_each(|(_k, v)| deposit += v.deposit); Ok((sample, deposit, storage).encode()) } @@ -269,7 +269,7 @@ where fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let state = <( Vec<(CodeHash, CodeInfo)>, - old::BalanceOf, + v11::BalanceOf, u32, ) as Decode>::decode(&mut &state[..]) .unwrap(); @@ -283,7 +283,7 @@ where ensure!(info.refcount == old.refcount, "invalid refcount"); } - if let Some((k, _)) = old::CodeStorage::::iter().next() { + if let Some((k, _)) = v11::CodeStorage::::iter().next() { log::warn!( target: LOG_TARGET, "CodeStorage is still NOT empty, found code_hash: {:?}", @@ -292,7 +292,7 @@ where } else { log::debug!(target: LOG_TARGET, "CodeStorage is empty."); } - if let Some((k, _)) = old::OwnerInfoOf::::iter().next() { + if let Some((k, _)) = v11::OwnerInfoOf::::iter().next() { log::warn!( target: LOG_TARGET, "OwnerInfoOf is still NOT empty, found code_hash: {:?}", @@ -302,7 +302,7 @@ where log::debug!(target: LOG_TARGET, "OwnerInfoOf is empty."); } - let mut deposit: old::BalanceOf = Default::default(); + let mut deposit: v11::BalanceOf = Default::default(); let mut items = 0u32; let mut storage_info = 0u32; CodeInfoOf::::iter().for_each(|(_k, v)| { diff --git a/substrate/frame/contracts/src/migration/v13.rs b/substrate/frame/contracts/src/migration/v13.rs index dd2eb12eb62a..498c44d53abc 100644 --- a/substrate/frame/contracts/src/migration/v13.rs +++ b/substrate/frame/contracts/src/migration/v13.rs @@ -28,7 +28,7 @@ use frame_support::{pallet_prelude::*, storage_alias, DefaultNoBound}; use sp_runtime::BoundedBTreeMap; use sp_std::prelude::*; -mod old { +mod v12 { use super::*; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -59,7 +59,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v12::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -69,7 +69,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_item_deposit: Default::default(), storage_base_deposit: Default::default(), }; - old::ContractInfoOf::::insert(account, info); + v12::ContractInfoOf::::insert(account, info); } #[storage_alias] @@ -104,11 +104,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v12::ContractInfoOf::::iter_from(v12::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v12::ContractInfoOf::::iter() }; if let Some((key, old)) = iter.next() { diff --git a/substrate/frame/contracts/src/migration/v14.rs b/substrate/frame/contracts/src/migration/v14.rs index 94534d05fdf8..09da09e5bcfb 100644 --- a/substrate/frame/contracts/src/migration/v14.rs +++ b/substrate/frame/contracts/src/migration/v14.rs @@ -44,7 +44,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::collections::btree_map::BTreeMap; -mod old { +mod v13 { use super::*; pub type BalanceOf = , #[codec(compact)] - pub deposit: old::BalanceOf, + pub deposit: v13::BalanceOf, #[codec(compact)] pub refcount: u64, pub determinism: Determinism, @@ -86,14 +86,14 @@ where let code = vec![42u8; len as usize]; let hash = T::Hashing::hash(&code); - let info = old::CodeInfo { + let info = v13::CodeInfo { owner: account, deposit: 10_000u32.into(), refcount: u64::MAX, determinism: Determinism::Enforced, code_len: len, }; - old::CodeInfoOf::::insert(hash, info); + v13::CodeInfoOf::::insert(hash, info); } #[cfg(feature = "try-runtime")] @@ -105,9 +105,9 @@ where OldCurrency: ReservableCurrency<::AccountId>, { /// Total reserved balance as code upload deposit for the owner. - reserved: old::BalanceOf, + reserved: v13::BalanceOf, /// Total balance of the owner. - total: old::BalanceOf, + total: v13::BalanceOf, } #[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)] @@ -134,11 +134,11 @@ where fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_hash) = self.last_code_hash.take() { - old::CodeInfoOf::::iter_from( - old::CodeInfoOf::::hashed_key_for(last_hash), + v13::CodeInfoOf::::iter_from( + v13::CodeInfoOf::::hashed_key_for(last_hash), ) } else { - old::CodeInfoOf::::iter() + v13::CodeInfoOf::::iter() }; if let Some((hash, code_info)) = iter.next() { @@ -194,7 +194,7 @@ where #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let info: Vec<_> = old::CodeInfoOf::::iter().collect(); + let info: Vec<_> = v13::CodeInfoOf::::iter().collect(); let mut owner_balance_allocation = BTreeMap::, BalanceAllocation>::new(); diff --git a/substrate/frame/contracts/src/migration/v15.rs b/substrate/frame/contracts/src/migration/v15.rs index 180fe855ca66..c77198d6fea0 100644 --- a/substrate/frame/contracts/src/migration/v15.rs +++ b/substrate/frame/contracts/src/migration/v15.rs @@ -46,7 +46,7 @@ use sp_runtime::{traits::Zero, Saturating}; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; -mod old { +mod v14 { use super::*; #[derive( @@ -81,7 +81,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co let entropy = (b"contract_depo_v1", account.clone()).using_encoded(T::Hashing::hash); let deposit_account = Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed"); - let info = old::ContractInfo { + let info = v14::ContractInfo { trie_id: info.trie_id.clone(), deposit_account, code_hash: info.code_hash, @@ -92,7 +92,7 @@ pub fn store_old_contract_info(account: T::AccountId, info: crate::Co storage_base_deposit: info.storage_base_deposit(), delegate_dependencies: info.delegate_dependencies().clone(), }; - old::ContractInfoOf::::insert(account, info); + v14::ContractInfoOf::::insert(account, info); } #[derive(Encode, Decode, CloneNoBound, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -127,11 +127,11 @@ impl MigrationStep for Migration { fn step(&mut self) -> (IsFinished, Weight) { let mut iter = if let Some(last_account) = self.last_account.take() { - old::ContractInfoOf::::iter_from(old::ContractInfoOf::::hashed_key_for( + v14::ContractInfoOf::::iter_from(v14::ContractInfoOf::::hashed_key_for( last_account, )) } else { - old::ContractInfoOf::::iter() + v14::ContractInfoOf::::iter() }; if let Some((account, old_contract)) = iter.next() { @@ -243,11 +243,11 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn pre_upgrade_step() -> Result, TryRuntimeError> { - let sample: Vec<_> = old::ContractInfoOf::::iter().take(100).collect(); + let sample: Vec<_> = v14::ContractInfoOf::::iter().take(100).collect(); log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len()); - let state: Vec<(T::AccountId, old::ContractInfo, BalanceOf, BalanceOf)> = sample + let state: Vec<(T::AccountId, v14::ContractInfo, BalanceOf, BalanceOf)> = sample .iter() .map(|(account, contract)| { ( @@ -265,7 +265,7 @@ impl MigrationStep for Migration { #[cfg(feature = "try-runtime")] fn post_upgrade_step(state: Vec) -> Result<(), TryRuntimeError> { let sample = - , BalanceOf, BalanceOf)> as Decode>::decode( + , BalanceOf, BalanceOf)> as Decode>::decode( &mut &state[..], ) .expect("pre_upgrade_step provides a valid state; qed"); diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 089556191cd1..08e2a7599f55 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -211,7 +211,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 4f43f89abed2..6bf4dfe4f1eb 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -1343,7 +1343,7 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; - /// The current storage version. + /// The in-code storage version. /// /// v1: https://github.com/paritytech/substrate/pull/12237/ const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); diff --git a/substrate/frame/election-provider-multi-phase/src/migrations.rs b/substrate/frame/election-provider-multi-phase/src/migrations.rs index 50b821e6db6a..156f1c02e27c 100644 --- a/substrate/frame/election-provider-multi-phase/src/migrations.rs +++ b/substrate/frame/election-provider-multi-phase/src/migrations.rs @@ -27,12 +27,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a078361a5f71..308f2cdc1aad 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -188,7 +188,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index eb6355edd312..45c7440eb891 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -23,6 +23,7 @@ pallet-example-frame-crate = { path = "frame-crate", default-features = false } pallet-example-kitchensink = { path = "kitchensink", default-features = false } pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } pallet-example-split = { path = "split", default-features = false } +pallet-example-single-block-migrations = { path = "single-block-migrations", default-features = false } pallet-example-tasks = { path = "tasks", default-features = false } [features] @@ -34,6 +35,7 @@ std = [ "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", "pallet-example-offchain-worker/std", + "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", ] @@ -43,6 +45,7 @@ try-runtime = [ "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", + "pallet-example-single-block-migrations/try-runtime", "pallet-example-split/try-runtime", "pallet-example-tasks/try-runtime", ] diff --git a/substrate/frame/examples/single-block-migrations/Cargo.toml b/substrate/frame/examples/single-block-migrations/Cargo.toml new file mode 100644 index 000000000000..5059b2433c77 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-example-single-block-migrations" +version = "0.0.1" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage = "https://substrate.io" +repository.workspace = true +description = "FRAME example pallet demonstrating best-practices for writing storage migrations." +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +docify = { version = "0.2.3", default-features = false } +log = { version = "0.4.20", default-features = false } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-support = { path = "../../support", default-features = false } +frame-executive = { path = "../../executive", default-features = false } +frame-system = { path = "../../system", default-features = false } +frame-try-runtime = { path = "../../try-runtime", default-features = false, optional = true } +pallet-balances = { path = "../../balances", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system/std", + "frame-try-runtime/std", + "log/std", + "pallet-balances/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/single-block-migrations/src/lib.rs b/substrate/frame/examples/single-block-migrations/src/lib.rs new file mode 100644 index 000000000000..86a9e5d6e95b --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/lib.rs @@ -0,0 +1,213 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Single Block Migration Example Pallet +//! +//! An example pallet demonstrating best-practices for writing single-block migrations in the +//! context of upgrading pallet storage. +//! +//! ## Forwarning +//! +//! Single block migrations **MUST** execute in a single block, therefore when executed on a +//! parachain are only appropriate when guaranteed to not exceed block weight limits. If a +//! parachain submits a block that exceeds the block weight limit it will **brick the chain**! +//! +//! If weight is a concern or you are not sure which type of migration to use, you should probably +//! use a multi-block migration. +//! +//! TODO: Link above to multi-block migration example. +//! +//! ## Pallet Overview +//! +//! This example pallet contains a single storage item [`Value`](pallet::Value), which may be set by +//! any signed origin by calling the [`set_value`](crate::Call::set_value) extrinsic. +//! +//! For the purposes of this exercise, we imagine that in [`StorageVersion`] V0 of this pallet +//! [`Value`](pallet::Value) is a `u32`, and this what is currently stored on-chain. +//! +//! ```ignore +//! // (Old) Storage Version V0 representation of `Value` +//! #[pallet::storage] +//! pub type Value = StorageValue<_, u32>; +//! ``` +//! +//! In [`StorageVersion`] V1 of the pallet a new struct [`CurrentAndPreviousValue`] is introduced: +#![doc = docify::embed!("src/lib.rs", CurrentAndPreviousValue)] +//! and [`Value`](pallet::Value) is updated to store this new struct instead of a `u32`: +#![doc = docify::embed!("src/lib.rs", Value)] +//! +//! In StorageVersion V1 of the pallet when [`set_value`](crate::Call::set_value) is called, the +//! new value is stored in the `current` field of [`CurrentAndPreviousValue`], and the previous +//! value (if it exists) is stored in the `previous` field. +#![doc = docify::embed!("src/lib.rs", pallet_calls)] +//! +//! ## Why a migration is necessary +//! +//! Without a migration, there will be a discrepancy between the on-chain storage for [`Value`] (in +//! V0 it is a `u32`) and the current storage for [`Value`] (in V1 it was changed to a +//! [`CurrentAndPreviousValue`] struct). +//! +//! The on-chain storage for [`Value`] would be a `u32` but the runtime would try to read it as a +//! [`CurrentAndPreviousValue`]. This would result in unacceptable undefined behavior. +//! +//! ## Adding a migration module +//! +//! Writing a pallets migrations in a seperate module is strongly recommended. +//! +//! Here's how the migration module is defined for this pallet: +//! +//! ```text +//! substrate/frame/examples/single-block-migrations/src/ +//! ├── lib.rs <-- pallet definition +//! ├── Cargo.toml <-- pallet manifest +//! └── migrations/ +//! ├── mod.rs <-- migrations module definition +//! └── v1.rs <-- migration logic for the V0 to V1 transition +//! ``` +//! +//! This structure allows keeping migration logic separate from the pallet logic and +//! easily adding new migrations in the future. +//! +//! ## Writing the Migration +//! +//! All code related to the migration can be found under +//! [`v1.rs`](migrations::v1). +//! +//! See the migration source code for detailed comments. +//! +//! To keep the migration logic organised, it is split across additional modules: +//! +//! ### `mod v0` +//! +//! Here we define a [`storage_alias`](frame_support::storage_alias) for the old v0 [`Value`] +//! format. +//! +//! This allows reading the old v0 value from storage during the migration. +//! +//! ### `mod version_unchecked` +//! +//! Here we define our raw migration logic, +//! `version_unchecked::MigrateV0ToV1` which implements the [`OnRuntimeUpgrade`] trait. +//! +//! Importantly, it is kept in a private module so that it cannot be accidentally used in a runtime. +//! +//! Private modules cannot be referenced in docs, so please read the code directly. +//! +//! #### Standalone Struct or Pallet Hook? +//! +//! Note that the storage migration logic is attached to a standalone struct implementing +//! [`OnRuntimeUpgrade`], rather than implementing the +//! [`Hooks::on_runtime_upgrade`](frame_support::traits::Hooks::on_runtime_upgrade) hook directly on +//! the pallet. The pallet hook is better suited for special types of logic that need to execute on +//! every runtime upgrade, but not so much for one-off storage migrations. +//! +//! ### `pub mod versioned` +//! +//! Here, `version_unchecked::MigrateV0ToV1` is wrapped in a +//! [`VersionedMigration`] to define +//! [`versioned::MigrateV0ToV1`](crate::migrations::v1::versioned::MigrateV0ToV1), which may be used +//! in runtimes. +//! +//! Using [`VersionedMigration`] ensures that +//! - The migration only runs once when the on-chain storage version is `0` +//! - The on-chain storage version is updated to `1` after the migration executes +//! - Reads and writes from checking and setting the on-chain storage version are accounted for in +//! the final [`Weight`](frame_support::weights::Weight) +//! +//! This is the only public module exported from `v1`. +//! +//! ### `mod test` +//! +//! Here basic unit tests are defined for the migration. +//! +//! When writing migration tests, don't forget to check: +//! - `on_runtime_upgrade` returns the expected weight +//! - `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +//! - Pallet storage is in the expected state after the migration +//! +//! [`VersionedMigration`]: frame_support::migrations::VersionedMigration +//! [`GetStorageVersion`]: frame_support::traits::GetStorageVersion +//! [`OnRuntimeUpgrade`]: frame_support::traits::OnRuntimeUpgrade +//! [`MigrateV0ToV1`]: crate::migrations::v1::versioned::MigrationV0ToV1 + +// We make sure this pallet uses `no_std` for compiling to Wasm. +#![cfg_attr(not(feature = "std"), no_std)] +// allow non-camel-case names for storage version V0 value +#![allow(non_camel_case_types)] + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +// Export migrations so they may be used in the runtime. +pub mod migrations; +#[doc(hidden)] +mod mock; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::traits::StorageVersion; +use sp_runtime::RuntimeDebug; + +/// Example struct holding the most recently set [`u32`] and the +/// second most recently set [`u32`] (if one existed). +#[docify::export] +#[derive( + Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen, +)] +pub struct CurrentAndPreviousValue { + /// The most recently set value. + pub current: u32, + /// The previous value, if one existed. + pub previous: Option, +} + +// Pallet for demonstrating storage migrations. +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// Define the current [`StorageVersion`] of the pallet. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + /// [`StorageVersion`] V1 of [`Value`]. + /// + /// Currently used. + #[docify::export] + #[pallet::storage] + pub type Value = StorageValue<_, CurrentAndPreviousValue>; + + #[docify::export(pallet_calls)] + #[pallet::call] + impl Pallet { + pub fn set_value(origin: OriginFor, value: u32) -> DispatchResult { + ensure_signed(origin)?; + + let previous = Value::::get().map(|v| v.current); + let new_struct = CurrentAndPreviousValue { current: value, previous }; + >::put(new_struct); + + Ok(()) + } + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs new file mode 100644 index 000000000000..80a33f69941a --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Module containing all logic associated with the example migration from +/// [`StorageVersion`](frame_support::traits::StorageVersion) V0 to V1. +pub mod v1; diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs new file mode 100644 index 000000000000..b46640a32020 --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -0,0 +1,222 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use frame_support::{ + storage_alias, + traits::{Get, OnRuntimeUpgrade}, +}; + +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// Collection of storage item formats from the previous storage version. +/// +/// Required so we can read values in the v0 storage format during the migration. +mod v0 { + use super::*; + + /// V0 type for [`crate::Value`]. + #[storage_alias] + pub type Value = StorageValue, u32>; +} + +/// Private module containing *version unchecked* migration logic. +/// +/// Should only be used by the [`VersionedMigration`](frame_support::migrations::VersionedMigration) +/// type in this module to create something to export. +/// +/// The unversioned migration should be kept private so the unversioned migration cannot +/// accidentally be used in any runtimes. +/// +/// For more about this pattern of keeping items private, see +/// - +/// - +mod version_unchecked { + use super::*; + + /// Implements [`OnRuntimeUpgrade`], migrating the state of this pallet from V0 to V1. + /// + /// In V0 of the template [`crate::Value`] is just a `u32`. In V1, it has been upgraded to + /// contain the struct [`crate::CurrentAndPreviousValue`]. + /// + /// In this migration, update the on-chain storage for the pallet to reflect the new storage + /// layout. + pub struct MigrateV0ToV1(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for MigrateV0ToV1 { + /// Return the existing [`crate::Value`] so we can check that it was correctly set in + /// `version_unchecked::MigrateV0ToV1::post_upgrade`. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + // Access the old value using the `storage_alias` type + let old_value = v0::Value::::get(); + // Return it as an encoded `Vec` + Ok(old_value.encode()) + } + + /// Migrate the storage from V0 to V1. + /// + /// - If the value doesn't exist, there is nothing to do. + /// - If the value exists, it is read and then written back to storage inside a + /// [`crate::CurrentAndPreviousValue`]. + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Read the old value from storage + if let Some(old_value) = v0::Value::::take() { + // Write the new value to storage + let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; + crate::Value::::put(new); + // One read for the old value, one write for the new value + T::DbWeight::get().reads_writes(1, 1) + } else { + // One read for trying to access the old value + T::DbWeight::get().reads(1) + } + } + + /// Verifies the storage was migrated correctly. + /// + /// - If there was no old value, the new value should not be set. + /// - If there was an old value, the new value should be a + /// [`crate::CurrentAndPreviousValue`]. + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + use codec::Decode; + use frame_support::ensure; + + let maybe_old_value = Option::::decode(&mut &state[..]).map_err(|_| { + sp_runtime::TryRuntimeError::Other("Failed to decode old value from storage") + })?; + + match maybe_old_value { + Some(old_value) => { + let expected_new_value = + crate::CurrentAndPreviousValue { current: old_value, previous: None }; + let actual_new_value = crate::Value::::get(); + + ensure!(actual_new_value.is_some(), "New value not set"); + ensure!( + actual_new_value == Some(expected_new_value), + "New value not set correctly" + ); + }, + None => { + ensure!(crate::Value::::get().is_none(), "New value unexpectedly set"); + }, + }; + Ok(()) + } + } +} + +/// Public module containing *version checked* migration logic. +/// +/// This is the only module that should be exported from this module. +/// +/// See [`VersionedMigration`](frame_support::migrations::VersionedMigration) docs for more about +/// how it works. +pub mod versioned { + use super::*; + + /// `version_unchecked::MigrateV0ToV1` wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), which ensures that: + /// - The migration only runs once when the on-chain storage version is 0 + /// - The on-chain storage version is updated to `1` after the migration executes + /// - Reads/Writes from checking/settings the on-chain storage version are accounted for + pub type MigrateV0ToV1 = frame_support::migrations::VersionedMigration< + 0, // The migration will only execute when the on-chain storage version is 0 + 1, // The on-chain storage version will be set to 1 after the migration is complete + version_unchecked::MigrateV0ToV1, + crate::pallet::Pallet, + ::DbWeight, + >; +} + +/// Tests for our migration. +/// +/// When writing migration tests, it is important to check: +/// 1. `on_runtime_upgrade` returns the expected weight +/// 2. `post_upgrade` succeeds when given the bytes returned by `pre_upgrade` +/// 3. The storage is in the expected state after the migration +#[cfg(any(all(feature = "try-runtime", test), doc))] +mod test { + use super::*; + use crate::mock::{new_test_ext, MockRuntime}; + use frame_support::assert_ok; + use version_unchecked::MigrateV0ToV1; + + #[test] + fn handles_no_existing_value() { + new_test_ext().execute_with(|| { + // By default, no value should be set. Verify this assumption. + assert!(crate::Value::::get().is_none()); + assert!(v0::Value::::get().is_none()); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight should be just 1 read for trying to access the old value. + assert_eq!(weight, ::DbWeight::get().reads(1)); + + // After the migration, no value should have been set. + assert!(crate::Value::::get().is_none()); + }) + } + + #[test] + fn handles_existing_value() { + new_test_ext().execute_with(|| { + // Set up an initial value + let initial_value = 42; + v0::Value::::put(initial_value); + + // Get the pre_upgrade bytes + let bytes = match MigrateV0ToV1::::pre_upgrade() { + Ok(bytes) => bytes, + Err(e) => panic!("pre_upgrade failed: {:?}", e), + }; + + // Execute the migration + let weight = MigrateV0ToV1::::on_runtime_upgrade(); + + // Verify post_upgrade succeeds + assert_ok!(MigrateV0ToV1::::post_upgrade(bytes)); + + // The weight used should be 1 read for the old value, and 1 write for the new + // value. + assert_eq!( + weight, + ::DbWeight::get().reads_writes(1, 1) + ); + + // After the migration, the new value should be set as the `current` value. + let expected_new_value = + crate::CurrentAndPreviousValue { current: initial_value, previous: None }; + assert_eq!(crate::Value::::get(), Some(expected_new_value)); + }) + } +} diff --git a/substrate/frame/examples/single-block-migrations/src/mock.rs b/substrate/frame/examples/single-block-migrations/src/mock.rs new file mode 100644 index 000000000000..02f72e01836f --- /dev/null +++ b/substrate/frame/examples/single-block-migrations/src/mock.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(any(all(feature = "try-runtime", test), doc))] + +use crate::*; +use frame_support::{derive_impl, traits::ConstU64, weights::constants::ParityDbWeight}; + +// Re-export crate as its pallet name for construct_runtime. +use crate as pallet_example_storage_migration; + +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub struct MockRuntime { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example_storage_migration::{Pallet, Call, Storage}, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for MockRuntime { + type Block = Block; + type AccountData = pallet_balances::AccountData; + type DbWeight = ParityDbWeight; +} + +impl pallet_balances::Config for MockRuntime { + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = u64; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); +} + +impl Config for MockRuntime {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + let t = RuntimeGenesisConfig { system: Default::default(), balances: Default::default() } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index f38bbe52dc11..dee23a41379f 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -43,6 +43,9 @@ //! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be //! built using only the `frame` umbrella crate. //! +//! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for +//! writing storage migrations. +//! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/fast-unstake/src/migrations.rs b/substrate/frame/fast-unstake/src/migrations.rs index 564388407045..97ad86bfff42 100644 --- a/substrate/frame/fast-unstake/src/migrations.rs +++ b/substrate/frame/fast-unstake/src/migrations.rs @@ -33,12 +33,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 0b9f2b358279..90bcd8721dfa 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -73,7 +73,7 @@ pub mod pallet { use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 1de89dd00c81..f14093aa09af 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -251,7 +251,7 @@ type OffchainResult = Result>>; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/im-online/src/migration.rs b/substrate/frame/im-online/src/migration.rs index 0d2c0a055b6d..754a2e672e6c 100644 --- a/substrate/frame/im-online/src/migration.rs +++ b/substrate/frame/im-online/src/migration.rs @@ -72,7 +72,7 @@ pub mod v1 { if StorageVersion::get::>() != 0 { log::warn!( target: TARGET, - "Skipping migration because current storage version is not 0" + "Skipping migration because in-code storage version is not 0" ); return weight } diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index f32263970038..f4ac697130de 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -46,7 +46,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index e4426c64b412..a83b78e316f5 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -168,7 +168,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/multisig/src/migrations.rs b/substrate/frame/multisig/src/migrations.rs index d03e42a66a5b..e6402600d0d3 100644 --- a/substrate/frame/multisig/src/migrations.rs +++ b/substrate/frame/multisig/src/migrations.rs @@ -51,7 +51,7 @@ pub mod v1 { fn on_runtime_upgrade() -> Weight { use sp_runtime::Saturating; - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain > 0 { diff --git a/substrate/frame/nfts/src/lib.rs b/substrate/frame/nfts/src/lib.rs index a7d505e2e397..7cf7cdc61a7d 100644 --- a/substrate/frame/nfts/src/lib.rs +++ b/substrate/frame/nfts/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::ExistenceRequirement}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/nfts/src/migration.rs b/substrate/frame/nfts/src/migration.rs index d4cdf7e820d1..8f82e092262f 100644 --- a/substrate/frame/nfts/src/migration.rs +++ b/substrate/frame/nfts/src/migration.rs @@ -54,17 +54,17 @@ pub mod v1 { pub struct MigrateToV1(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); log::info!( target: LOG_TARGET, - "Running migration with current storage version {:?} / onchain {:?}", - current_version, - onchain_version + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code_version, + on_chain_version ); - if onchain_version == 0 && current_version == 1 { + if on_chain_version == 0 && in_code_version == 1 { let mut translated = 0u64; let mut configs_iterated = 0u64; Collection::::translate::< @@ -77,13 +77,13 @@ pub mod v1 { Some(old_value.migrate_to_v1(item_configs)) }); - current_version.put::>(); + in_code_version.put::>(); log::info!( target: LOG_TARGET, "Upgraded {} records, storage to version {:?}", translated, - current_version + in_code_version ); T::DbWeight::get().reads_writes(translated + configs_iterated + 1, translated + 1) } else { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 074d59931ade..c64db9ed6926 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1576,7 +1576,7 @@ pub mod pallet { use frame_system::{ensure_signed, pallet_prelude::*}; use sp_runtime::Perbill; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 6887fcfa7eca..14410339f59a 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -342,25 +342,25 @@ pub mod v5 { pub struct MigrateToV5(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV5 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", - current, + "Running migration with in-code storage version {:?} / onchain {:?}", + in_code, onchain ); - if current == 5 && onchain == 4 { + if in_code == 5 && onchain == 4 { let mut translated = 0u64; RewardPools::::translate::, _>(|_id, old_value| { translated.saturating_inc(); Some(old_value.migrate_to_v5()) }); - current.put::>(); - log!(info, "Upgraded {} pools, storage to version {:?}", translated, current); + in_code.put::>(); + log!(info, "Upgraded {} pools, storage to version {:?}", translated, in_code); // reads: translated + onchain version. // writes: translated + current.put. @@ -498,12 +498,12 @@ pub mod v4 { #[allow(deprecated)] impl> OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -579,13 +579,13 @@ pub mod v3 { pub struct MigrateToV3(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV3 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); if onchain == 2 { log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -859,12 +859,12 @@ pub mod v2 { impl OnRuntimeUpgrade for MigrateToV2 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); @@ -976,12 +976,12 @@ pub mod v1 { pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let current = Pallet::::in_code_storage_version(); let onchain = Pallet::::on_chain_storage_version(); log!( info, - "Running migration with current storage version {:?} / onchain {:?}", + "Running migration with in-code storage version {:?} / onchain {:?}", current, onchain ); diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index e344bdfe2d8f..4e4746851666 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -102,7 +102,7 @@ pub const MAX_HASH_UPGRADE_BULK_COUNT: u32 = 1024; pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::config] diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index c5bf2266e672..e616056c3022 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -143,7 +143,7 @@ pub mod pallet { use frame_support::{pallet_prelude::*, traits::EnsureOriginWithArg}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/referenda/src/migration.rs b/substrate/frame/referenda/src/migration.rs index a80897242eec..631eb7340e56 100644 --- a/substrate/frame/referenda/src/migration.rs +++ b/substrate/frame/referenda/src/migration.rs @@ -109,16 +109,16 @@ pub mod v1 { } fn on_runtime_upgrade() -> Weight { - let current_version = Pallet::::current_storage_version(); - let onchain_version = Pallet::::on_chain_storage_version(); + let in_code_version = Pallet::::in_code_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); let mut weight = T::DbWeight::get().reads(1); log::info!( target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version + "running migration with in-code storage version {:?} / onchain {:?}.", + in_code_version, + on_chain_version ); - if onchain_version != 0 { + if on_chain_version != 0 { log::warn!(target: TARGET, "skipping migration from v0 to v1."); return weight } @@ -149,8 +149,8 @@ pub mod v1 { #[cfg(feature = "try-runtime")] fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - let onchain_version = Pallet::::on_chain_storage_version(); - ensure!(onchain_version == 1, "must upgrade from version 0 to 1."); + let on_chain_version = Pallet::::on_chain_storage_version(); + ensure!(on_chain_version == 1, "must upgrade from version 0 to 1."); let pre_referendum_count: u32 = Decode::decode(&mut &state[..]) .expect("failed to decode the state from pre-upgrade."); let post_referendum_count = ReferendumInfoFor::::iter().count() as u32; diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index daebebdee995..af3abd8ac4f2 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -229,7 +229,7 @@ pub mod pallet { use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet] diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index d74e9dd0b7c5..b9cecea1a7f7 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -58,7 +58,7 @@ pub mod pallet { use super::*; use frame_support::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index fc35cd6ddd82..7d8128dbc1fe 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); #[pallet::pallet] diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index dafb1e0b9e5e..6a1029114519 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -40,11 +40,11 @@ impl< { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { - let current = Pallet::::current_storage_version(); - let onchain = Pallet::::on_chain_storage_version(); - ensure!(onchain == 0 && current == 2, "pallet_society: invalid version"); + let in_code = Pallet::::in_code_storage_version(); + let on_chain = Pallet::::on_chain_storage_version(); + ensure!(on_chain == 0 && in_code == 2, "pallet_society: invalid version"); - Ok((old::Candidates::::get(), old::Members::::get()).encode()) + Ok((v0::Candidates::::get(), v0::Members::::get()).encode()) } fn on_runtime_upgrade() -> Weight { @@ -103,7 +103,7 @@ pub type MigrateToV2 = frame_support::migrations::VersionedMi ::DbWeight, >; -pub(crate) mod old { +pub(crate) mod v0 { use super::*; use frame_support::storage_alias; @@ -230,37 +230,37 @@ pub fn assert_internal_consistency, I: Instance + 'static>() { } // We don't use these - make sure they don't exist. - assert_eq!(old::SuspendedCandidates::::iter().count(), 0); - assert_eq!(old::Strikes::::iter().count(), 0); - assert_eq!(old::Vouching::::iter().count(), 0); - assert!(!old::Defender::::exists()); - assert!(!old::Members::::exists()); + assert_eq!(v0::SuspendedCandidates::::iter().count(), 0); + assert_eq!(v0::Strikes::::iter().count(), 0); + assert_eq!(v0::Vouching::::iter().count(), 0); + assert!(!v0::Defender::::exists()); + assert!(!v0::Members::::exists()); } pub fn from_original, I: Instance + 'static>( past_payouts: &mut [(::AccountId, BalanceOf)], ) -> Result { // Migrate Bids from old::Bids (just a trunctation). - Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(old::Bids::::take())); + Bids::::put(BoundedVec::<_, T::MaxBids>::truncate_from(v0::Bids::::take())); // Initialise round counter. RoundCount::::put(0); // Migrate Candidates from old::Candidates - for Bid { who: candidate, kind, value } in old::Candidates::::take().into_iter() { + for Bid { who: candidate, kind, value } in v0::Candidates::::take().into_iter() { let mut tally = Tally::default(); // Migrate Votes from old::Votes // No need to drain, since we're overwriting values. - for (voter, vote) in old::Votes::::iter_prefix(&candidate) { + for (voter, vote) in v0::Votes::::iter_prefix(&candidate) { Votes::::insert( &candidate, &voter, - Vote { approve: vote == old::Vote::Approve, weight: 1 }, + Vote { approve: vote == v0::Vote::Approve, weight: 1 }, ); match vote { - old::Vote::Approve => tally.approvals.saturating_inc(), - old::Vote::Reject => tally.rejections.saturating_inc(), - old::Vote::Skeptic => Skeptic::::put(&voter), + v0::Vote::Approve => tally.approvals.saturating_inc(), + v0::Vote::Reject => tally.rejections.saturating_inc(), + v0::Vote::Skeptic => Skeptic::::put(&voter), } } Candidates::::insert( @@ -271,9 +271,9 @@ pub fn from_original, I: Instance + 'static>( // Migrate Members from old::Members old::Strikes old::Vouching let mut member_count = 0; - for member in old::Members::::take() { - let strikes = old::Strikes::::take(&member); - let vouching = old::Vouching::::take(&member); + for member in v0::Members::::take() { + let strikes = v0::Strikes::::take(&member); + let vouching = v0::Vouching::::take(&member); let record = MemberRecord { index: member_count, rank: 0, strikes, vouching }; Members::::insert(&member, record); MemberByIndex::::insert(member_count, &member); @@ -314,7 +314,7 @@ pub fn from_original, I: Instance + 'static>( // Migrate Payouts from: old::Payouts and raw info (needed since we can't query old chain // state). past_payouts.sort(); - for (who, mut payouts) in old::Payouts::::iter() { + for (who, mut payouts) in v0::Payouts::::iter() { payouts.truncate(T::MaxPayouts::get() as usize); // ^^ Safe since we already truncated. let paid = past_payouts @@ -329,19 +329,19 @@ pub fn from_original, I: Instance + 'static>( } // Migrate SuspendedMembers from old::SuspendedMembers old::Strikes old::Vouching. - for who in old::SuspendedMembers::::iter_keys() { - let strikes = old::Strikes::::take(&who); - let vouching = old::Vouching::::take(&who); + for who in v0::SuspendedMembers::::iter_keys() { + let strikes = v0::Strikes::::take(&who); + let vouching = v0::Vouching::::take(&who); let record = MemberRecord { index: 0, rank: 0, strikes, vouching }; SuspendedMembers::::insert(&who, record); } // Any suspended candidates remaining are rejected. - let _ = old::SuspendedCandidates::::clear(u32::MAX, None); + let _ = v0::SuspendedCandidates::::clear(u32::MAX, None); // We give the current defender the benefit of the doubt. - old::Defender::::kill(); - let _ = old::DefenderVotes::::clear(u32::MAX, None); + v0::Defender::::kill(); + let _ = v0::DefenderVotes::::clear(u32::MAX, None); Ok(T::BlockWeights::get().max_block) } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 940643168fb4..411567e1dedf 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use migrations::old; +use migrations::v0; use mock::*; use frame_support::{assert_noop, assert_ok}; @@ -32,41 +32,41 @@ use RuntimeOrigin as Origin; #[test] fn migration_works() { EnvBuilder::new().founded(false).execute(|| { - use old::Vote::*; + use v0::Vote::*; // Initialise the old storage items. Founder::::put(10); Head::::put(30); - old::Members::::put(vec![10, 20, 30]); - old::Vouching::::insert(30, Vouching); - old::Vouching::::insert(40, Banned); - old::Strikes::::insert(20, 1); - old::Strikes::::insert(30, 2); - old::Strikes::::insert(40, 5); - old::Payouts::::insert(20, vec![(1, 1)]); - old::Payouts::::insert( + v0::Members::::put(vec![10, 20, 30]); + v0::Vouching::::insert(30, Vouching); + v0::Vouching::::insert(40, Banned); + v0::Strikes::::insert(20, 1); + v0::Strikes::::insert(30, 2); + v0::Strikes::::insert(40, 5); + v0::Payouts::::insert(20, vec![(1, 1)]); + v0::Payouts::::insert( 30, (0..=::MaxPayouts::get()) .map(|i| (i as u64, i as u64)) .collect::>(), ); - old::SuspendedMembers::::insert(40, true); + v0::SuspendedMembers::::insert(40, true); - old::Defender::::put(20); - old::DefenderVotes::::insert(10, Approve); - old::DefenderVotes::::insert(20, Approve); - old::DefenderVotes::::insert(30, Reject); + v0::Defender::::put(20); + v0::DefenderVotes::::insert(10, Approve); + v0::DefenderVotes::::insert(20, Approve); + v0::DefenderVotes::::insert(30, Reject); - old::SuspendedCandidates::::insert(50, (10, Deposit(100))); + v0::SuspendedCandidates::::insert(50, (10, Deposit(100))); - old::Candidates::::put(vec![ + v0::Candidates::::put(vec![ Bid { who: 60, kind: Deposit(100), value: 200 }, Bid { who: 70, kind: Vouch(30, 30), value: 100 }, ]); - old::Votes::::insert(60, 10, Approve); - old::Votes::::insert(70, 10, Reject); - old::Votes::::insert(70, 20, Approve); - old::Votes::::insert(70, 30, Approve); + v0::Votes::::insert(60, 10, Approve); + v0::Votes::::insert(70, 10, Reject); + v0::Votes::::insert(70, 20, Approve); + v0::Votes::::insert(70, 30, Approve); let bids = (0..=::MaxBids::get()) .map(|i| Bid { @@ -75,7 +75,7 @@ fn migration_works() { value: 10u64 + i as u64, }) .collect::>(); - old::Bids::::put(bids); + v0::Bids::::put(bids); migrations::from_original::(&mut [][..]).expect("migration failed"); migrations::assert_internal_consistency::(); diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 98984be9920d..3e3b1113a819 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -67,11 +67,11 @@ pub mod v14 { pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let on_chain = Pallet::::on_chain_storage_version(); - if current == 14 && on_chain == 13 { - current.put::>(); + if in_code == 14 && on_chain == 13 { + in_code.put::>(); log!(info, "v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) @@ -108,12 +108,12 @@ pub mod v13 { } fn on_runtime_upgrade() -> Weight { - let current = Pallet::::current_storage_version(); + let in_code = Pallet::::in_code_storage_version(); let onchain = StorageVersion::::get(); - if current == 13 && onchain == ObsoleteReleases::V12_0_0 { + if in_code == 13 && onchain == ObsoleteReleases::V12_0_0 { StorageVersion::::kill(); - current.put::>(); + in_code.put::>(); log!(info, "v13 applied successfully"); T::DbWeight::get().reads_writes(1, 2) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index e0213efd507f..992a7fe2c9c6 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -66,7 +66,7 @@ pub mod pallet { use super::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); #[pallet::pallet] diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 78729ee3dc3f..5840377d7225 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -555,6 +555,42 @@ pub fn __create_tt_macro(input: TokenStream) -> TokenStream { tt_macro::create_tt_return_macro(input) } +/// Allows accessing on-chain pallet storage that is no longer accessible via the pallet. +/// +/// This is especially useful when writing storage migrations, when types of storage items are +/// modified or outright removed, but the previous definition is required to perform the migration. +/// +/// ## Example +/// +/// Imagine a pallet with the following storage definition: +/// ```ignore +/// #[pallet::storage] +/// pub type Value = StorageValue<_, u32>; +/// ``` +/// `Value` can be accessed by calling `Value::::get()`. +/// +/// Now imagine the definition of `Value` is updated to a `(u32, u32)`: +/// ```ignore +/// #[pallet::storage] +/// pub type Value = StorageValue<_, (u32, u32)>; +/// ``` +/// The on-chain value of `Value` is `u32`, but `Value::::get()` expects it to be `(u32, u32)`. +/// +/// In this instance the developer must write a storage migration to reading the old value of +/// `Value` and writing it back to storage in the new format, so that the on-chain storage layout is +/// consistent with what is defined in the pallet. +/// +/// We can read the old v0 value of `Value` in the migration by creating a `storage_alias`: +/// ```ignore +/// pub(crate) mod v0 { +/// use super::*; +/// +/// #[storage_alias] +/// pub type Value = StorageValue, u32>; +/// } +/// ``` +/// +/// The developer can now access the old value of `Value` by calling `v0::Value::::get()`. #[proc_macro_attribute] pub fn storage_alias(attributes: TokenStream, input: TokenStream) -> TokenStream { storage_alias::storage_alias(attributes.into(), input.into()) @@ -1058,7 +1094,7 @@ pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { /// pub struct Pallet(_); /// ``` /// -/// If not present, the current storage version is set to the default value. +/// If not present, the in-code storage version is set to the default value. #[proc_macro_attribute] pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 5044d4285bb6..6b25ddcba1a7 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -42,7 +42,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::name::().unwrap_or("") }; - let initialize_on_chain_storage_version = if let Some(current_version) = + let initialize_on_chain_storage_version = if let Some(in_code_version) = &def.pallet_struct.storage_version { quote::quote! { @@ -50,9 +50,9 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { target: #frame_support::LOG_TARGET, "🐥 New pallet {:?} detected in the runtime. Initializing the on-chain storage version to match the storage version defined in the pallet: {:?}", #pallet_name, - #current_version + #in_code_version ); - #current_version.put::(); + #in_code_version.put::(); } } else { quote::quote! { @@ -73,10 +73,10 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::__private::log::info!( target: #frame_support::LOG_TARGET, "⚠️ {} declares internal migrations (which *might* execute). \ - On-chain `{:?}` vs current storage version `{:?}`", + On-chain `{:?}` vs in-code storage version `{:?}`", #pallet_name, ::on_chain_storage_version(), - ::current_storage_version(), + ::in_code_storage_version(), ); } } else { @@ -102,23 +102,23 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { }; // If a storage version is set, we should ensure that the storage version on chain matches the - // current storage version. This assumes that `Executive` is running custom migrations before + // in-code storage version. This assumes that `Executive` is running custom migrations before // the pallets are called. let post_storage_version_check = if def.pallet_struct.storage_version.is_some() { quote::quote! { let on_chain_version = ::on_chain_storage_version(); - let current_version = ::current_storage_version(); + let in_code_version = ::in_code_storage_version(); - if on_chain_version != current_version { + if on_chain_version != in_code_version { #frame_support::__private::log::error!( target: #frame_support::LOG_TARGET, - "{}: On chain storage version {:?} doesn't match current storage version {:?}.", + "{}: On chain storage version {:?} doesn't match in-code storage version {:?}.", #pallet_name, on_chain_version, - current_version, + in_code_version, ); - return Err("On chain and current storage version do not match. Missing runtime upgrade?".into()); + return Err("On chain and in-code storage version do not match. Missing runtime upgrade?".into()); } } } else { diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index c2102f0284db..7cdf6bde9de8 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -160,7 +160,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } ); - let (storage_version, current_storage_version_ty) = + let (storage_version, in_code_storage_version_ty) = if let Some(v) = def.pallet_struct.storage_version.as_ref() { (quote::quote! { #v }, quote::quote! { #frame_support::traits::StorageVersion }) } else { @@ -203,9 +203,9 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { for #pallet_ident<#type_use_gen> #config_where_clause { - type CurrentStorageVersion = #current_storage_version_ty; + type InCodeStorageVersion = #in_code_storage_version_ty; - fn current_storage_version() -> Self::CurrentStorageVersion { + fn in_code_storage_version() -> Self::InCodeStorageVersion { #storage_version } diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index f4af86aa3e99..c2855ae38ef0 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -44,7 +44,7 @@ pub struct PalletStructDef { /// Whether to specify the storages max encoded len when implementing `StorageInfoTrait`. /// Contains the span of the attribute. pub without_storage_info: Option, - /// The current storage version of the pallet. + /// The in-code storage version of the pallet. pub storage_version: Option, } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 3e97a3842756..d2d9a33053fb 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -1178,7 +1178,7 @@ pub mod pallet_prelude { /// # `pallet::storage_version` /// /// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro -/// implements [`traits::GetStorageVersion`], the current storage version needs to be +/// implements [`traits::GetStorageVersion`], the in-code storage version needs to be /// communicated to the macro. This can be done by using the `pallet::storage_version` /// attribute: /// @@ -1190,7 +1190,7 @@ pub mod pallet_prelude { /// pub struct Pallet(_); /// ``` /// -/// If not present, the current storage version is set to the default value. +/// If not present, the in-code storage version is set to the default value. /// /// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) /// diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index d059a992a866..f75410590232 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -43,12 +43,30 @@ use sp_std::marker::PhantomData; /// Otherwise, a warning is logged notifying the developer that the upgrade was a noop and should /// probably be removed. /// +/// It is STRONGLY RECOMMENDED to write the unversioned migration logic in a private module and +/// only export the versioned migration logic to prevent accidentally using the unversioned +/// migration in any runtimes. +/// /// ### Examples /// ```ignore /// // In file defining migrations -/// pub struct VersionUncheckedMigrateV5ToV6(sp_std::marker::PhantomData); -/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { -/// // OnRuntimeUpgrade implementation... +/// +/// /// Private module containing *version unchecked* migration logic. +/// /// +/// /// Should only be used by the [`VersionedMigration`] type in this module to create something to +/// /// export. +/// /// +/// /// We keep this private so the unversioned migration cannot accidentally be used in any runtimes. +/// /// +/// /// For more about this pattern of keeping items private, see +/// /// - https://github.com/rust-lang/rust/issues/30905 +/// /// - https://internals.rust-lang.org/t/lang-team-minutes-private-in-public-rules/4504/40 +/// mod version_unchecked { +/// use super::*; +/// pub struct MigrateV5ToV6(sp_std::marker::PhantomData); +/// impl OnRuntimeUpgrade for VersionUncheckedMigrateV5ToV6 { +/// // OnRuntimeUpgrade implementation... +/// } /// } /// /// pub type MigrateV5ToV6 = @@ -91,7 +109,7 @@ impl< const FROM: u16, const TO: u16, Inner: crate::traits::OnRuntimeUpgrade, - Pallet: GetStorageVersion + PalletInfoAccess, + Pallet: GetStorageVersion + PalletInfoAccess, DbWeight: Get, > crate::traits::OnRuntimeUpgrade for VersionedMigration { @@ -163,25 +181,25 @@ impl< } } -/// Can store the current pallet version in storage. -pub trait StoreCurrentStorageVersion { - /// Write the current storage version to the storage. - fn store_current_storage_version(); +/// Can store the in-code pallet version on-chain. +pub trait StoreInCodeStorageVersion { + /// Write the in-code storage version on-chain. + fn store_in_code_storage_version(); } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for StorageVersion +impl + PalletInfoAccess> + StoreInCodeStorageVersion for StorageVersion { - fn store_current_storage_version() { - let version = ::current_storage_version(); + fn store_in_code_storage_version() { + let version = ::in_code_storage_version(); version.put::(); } } -impl + PalletInfoAccess> - StoreCurrentStorageVersion for NoStorageVersionSet +impl + PalletInfoAccess> + StoreInCodeStorageVersion for NoStorageVersionSet { - fn store_current_storage_version() { + fn store_in_code_storage_version() { StorageVersion::default().put::(); } } @@ -193,7 +211,7 @@ pub trait PalletVersionToStorageVersionHelper { impl PalletVersionToStorageVersionHelper for T where - T::CurrentStorageVersion: StoreCurrentStorageVersion, + T::InCodeStorageVersion: StoreInCodeStorageVersion, { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; @@ -204,8 +222,7 @@ where sp_io::storage::clear(&pallet_version_key(::name())); - >::store_current_storage_version( - ); + >::store_in_code_storage_version(); db_weight.writes(2) } @@ -226,7 +243,7 @@ impl PalletVersionToStorageVersionHelper for T { /// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// -/// This will remove all `PalletVersion's` from the state and insert the current storage version. +/// This will remove all `PalletVersion's` from the state and insert the in-code storage version. pub fn migrate_from_pallet_version_to_storage_version< Pallets: PalletVersionToStorageVersionHelper, >( diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 20788ce932d8..72f047c3a73d 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -90,7 +90,7 @@ impl OnIdle for Tuple { /// /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. +/// will set the genesis state after all pallets have written their genesis state. #[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] @@ -306,19 +306,23 @@ pub trait IntegrityTest { /// end /// ``` /// -/// * `OnRuntimeUpgrade` is only executed before everything else if a code -/// * `OnRuntimeUpgrade` is mandatorily at the beginning of the block body (extrinsics) being -/// processed. change is detected. -/// * Extrinsics start with inherents, and continue with other signed or unsigned extrinsics. -/// * `OnIdle` optionally comes after extrinsics. -/// `OnFinalize` mandatorily comes after `OnIdle`. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are only executed when a code change is +/// detected. +/// * [`OnRuntimeUpgrade`](Hooks::OnRuntimeUpgrade) hooks are mandatorily executed at the very +/// beginning of the block body, before any extrinsics are processed. +/// * [`Inherents`](sp_inherents) are always executed before any other other signed or unsigned +/// extrinsics. +/// * [`OnIdle`](Hooks::OnIdle) hooks are executed after extrinsics if there is weight remaining in +/// the block. +/// * [`OnFinalize`](Hooks::OnFinalize) hooks are mandatorily executed after +/// [`OnIdle`](Hooks::OnIdle). /// -/// > `OffchainWorker` is not part of this flow, as it is not really part of the consensus/main -/// > block import path, and is called optionally, and in other circumstances. See -/// > [`crate::traits::misc::OffchainWorker`] for more information. +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) hooks are not part of this flow, +/// > because they are not part of the consensus/main block building logic. See +/// > [`OffchainWorker`](crate::traits::misc::OffchainWorker) for more information. /// -/// To learn more about the execution of hooks see `frame-executive` as this component is is charge -/// of dispatching extrinsics and placing the hooks in the correct order. +/// To learn more about the execution of hooks see the FRAME `Executive` pallet which is in charge +/// of dispatching extrinsics and calling hooks in the correct order. pub trait Hooks { /// Block initialization hook. This is called at the very beginning of block execution. /// @@ -370,30 +374,38 @@ pub trait Hooks { Weight::zero() } - /// Hook executed when a code change (aka. a "runtime upgrade") is detected by FRAME. + /// Hook executed when a code change (aka. a "runtime upgrade") is detected by the FRAME + /// `Executive` pallet. /// /// Be aware that this is called before [`Hooks::on_initialize`] of any pallet; therefore, a lot /// of the critical storage items such as `block_number` in system pallet might have not been - /// set. + /// set yet. /// - /// Vert similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST - /// execute. Use with care. + /// Similar to [`Hooks::on_initialize`], any code in this block is mandatory and MUST execute. + /// It is strongly recommended to dry-run the execution of these hooks using + /// [try-runtime-cli](https://github.com/paritytech/try-runtime-cli) to ensure they will not + /// produce and overweight block which can brick your chain. Use with care! /// - /// ## Implementation Note: Versioning + /// ## Implementation Note: Standalone Migrations /// - /// 1. An implementation of this should typically follow a pattern where the version of the - /// pallet is checked against the onchain version, and a decision is made about what needs to be - /// done. This is helpful to prevent accidental repetitive execution of this hook, which can be - /// catastrophic. + /// Additional migrations can be created by directly implementing [`OnRuntimeUpgrade`] on + /// structs and passing them to `Executive`. /// - /// Alternatively, [`frame_support::migrations::VersionedMigration`] can be used to assist with - /// this. + /// ## Implementation Note: Pallet Versioning /// - /// ## Implementation Note: Runtime Level Migration + /// Implementations of this hook are typically wrapped in + /// [`crate::migrations::VersionedMigration`] to ensure the migration is executed exactly + /// once and only when it is supposed to. /// - /// Additional "upgrade hooks" can be created by pallets by a manual implementation of - /// [`Hooks::on_runtime_upgrade`] which can be passed on to `Executive` at the top level - /// runtime. + /// Alternatively, developers can manually implement version checks. + /// + /// Failure to adequately check storage versions can result in accidental repetitive execution + /// of the hook, which can be catastrophic. + /// + /// ## Implementation Note: Weight + /// + /// Typically, implementations of this method are simple enough that weights can be calculated + /// manually. However, if required, a benchmark can also be used. fn on_runtime_upgrade() -> Weight { Weight::zero() } @@ -403,7 +415,7 @@ pub trait Hooks { /// It should focus on certain checks to ensure that the state is sensible. This is never /// executed in a consensus code-path, therefore it can consume as much weight as it needs. /// - /// This hook should not alter any storage. + /// This hook must not alter any storage. #[cfg(feature = "try-runtime")] fn try_state(_n: BlockNumber) -> Result<(), TryRuntimeError> { Ok(()) @@ -415,7 +427,7 @@ pub trait Hooks { /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector /// should be returned if there is no such need. /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + /// This hook is never executed on-chain but instead used by testing tools. #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, TryRuntimeError> { Ok(Vec::new()) diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index 586af20511a8..8bda4186bc96 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -261,41 +261,64 @@ impl Add for StorageVersion { } } -/// Special marker struct if no storage version is set for a pallet. +/// Special marker struct used when [`storage_version`](crate::pallet_macros::storage_version) is +/// not defined for a pallet. /// /// If you (the reader) end up here, it probably means that you tried to compare /// [`GetStorageVersion::on_chain_storage_version`] against -/// [`GetStorageVersion::current_storage_version`]. This basically means that the -/// [`storage_version`](crate::pallet_macros::storage_version) is missing in the pallet where the -/// mentioned functions are being called. +/// [`GetStorageVersion::in_code_storage_version`]. This basically means that the +/// [`storage_version`](crate::pallet_macros::storage_version) is missing from the pallet where the +/// mentioned functions are being called, and needs to be defined. #[derive(Debug, Default)] pub struct NoStorageVersionSet; -/// Provides information about the storage version of a pallet. +/// Provides information about a pallet's storage versions. /// -/// It differentiates between current and on-chain storage version. Both should be only out of sync -/// when a new runtime upgrade was applied and the runtime migrations did not yet executed. -/// Otherwise it means that the pallet works with an unsupported storage version and unforeseen -/// stuff can happen. +/// Every pallet has two storage versions: +/// 1. An in-code storage version +/// 2. An on-chain storage version /// -/// The current storage version is the version of the pallet as supported at runtime. The active -/// storage version is the version of the pallet in the storage. +/// The in-code storage version is the version of the pallet as defined in the runtime blob, and the +/// on-chain storage version is the version of the pallet stored on-chain. /// -/// It is required to update the on-chain storage version manually when a migration was applied. +/// Storage versions should be only ever be out of sync when a pallet has been updated to a new +/// version and the in-code version is incremented, but the migration has not yet been executed +/// on-chain as part of a runtime upgrade. +/// +/// It is the responsibility of the developer to ensure that the on-chain storage version is set +/// correctly during a migration so that it matches the in-code storage version. pub trait GetStorageVersion { - /// This will be filled out by the [`pallet`](crate::pallet) macro. + /// This type is generated by the [`pallet`](crate::pallet) macro. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't specified, + /// this is set to [`NoStorageVersionSet`] to signify that it is missing. + /// + /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute is specified, + /// this is be set to a [`StorageVersion`] corresponding to the attribute. /// - /// If the [`storage_version`](crate::pallet_macros::storage_version) attribute isn't given - /// this is set to [`NoStorageVersionSet`] to inform the user that the attribute is missing. - /// This should prevent that the user forgets to set a storage version when required. However, - /// this will only work when the user actually tries to call [`Self::current_storage_version`] - /// to compare it against the [`Self::on_chain_storage_version`]. If the attribute is given, - /// this will be set to [`StorageVersion`]. - type CurrentStorageVersion; - - /// Returns the current storage version as supported by the pallet. - fn current_storage_version() -> Self::CurrentStorageVersion; - /// Returns the on-chain storage version of the pallet as stored in the storage. + /// The intention of using [`NoStorageVersionSet`] instead of defaulting to a [`StorageVersion`] + /// of zero is to prevent developers from forgetting to set + /// [`storage_version`](crate::pallet_macros::storage_version) when it is required, like in the + /// case that they wish to compare the in-code storage version to the on-chain storage version. + type InCodeStorageVersion; + + #[deprecated( + note = "This method has been renamed to `in_code_storage_version` and will be removed after March 2024." + )] + /// DEPRECATED: Use [`Self::current_storage_version`] instead. + /// + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn current_storage_version() -> Self::InCodeStorageVersion { + Self::in_code_storage_version() + } + + /// Returns the in-code storage version as specified in the + /// [`storage_version`](crate::pallet_macros::storage_version) attribute, or + /// [`NoStorageVersionSet`] if the attribute is missing. + fn in_code_storage_version() -> Self::InCodeStorageVersion; + /// Returns the storage version of the pallet as last set in the actual on-chain storage. fn on_chain_storage_version() -> StorageVersion; } diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index b8341b25cb09..b76986818867 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -683,7 +683,7 @@ fn test_metadata() { name: "Version", ty: meta_type::(), value: RuntimeVersion::default().encode(), - docs: maybe_docs(vec![ " Get the chain's current version."]), + docs: maybe_docs(vec![ " Get the chain's in-code version."]), }, PalletConstantMetadata { name: "SS58Prefix", diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9b4381c2f82b..7bee4b7c8206 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -590,7 +590,7 @@ pub mod pallet2 { Self::deposit_event(Event::Something(31)); if UpdateStorageVersion::get() { - Self::current_storage_version().put::(); + Self::in_code_storage_version().put::(); } Weight::zero() @@ -1310,7 +1310,7 @@ fn pallet_on_genesis() { assert_eq!(pallet::Pallet::::on_chain_storage_version(), StorageVersion::new(0)); pallet::Pallet::::on_genesis(); assert_eq!( - pallet::Pallet::::current_storage_version(), + pallet::Pallet::::in_code_storage_version(), pallet::Pallet::::on_chain_storage_version(), ); }) @@ -2257,10 +2257,10 @@ fn pallet_on_chain_storage_version_initializes_correctly() { AllPalletsWithSystem, >; - // Simple example of a pallet with current version 10 being added to the runtime for the first + // Simple example of a pallet with in-code version 10 being added to the runtime for the first // time. TestExternalities::default().execute_with(|| { - let current_version = Example::current_storage_version(); + let in_code_version = Example::in_code_storage_version(); // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example::name().as_bytes()); @@ -2271,14 +2271,14 @@ fn pallet_on_chain_storage_version_initializes_correctly() { // version. Executive::execute_on_runtime_upgrade(); - // Check that the storage version was initialized to the current version + // Check that the storage version was initialized to the in-code version let on_chain_version_after = StorageVersion::get::(); - assert_eq!(on_chain_version_after, current_version); + assert_eq!(on_chain_version_after, in_code_version); }); - // Pallet with no current storage version should have the on-chain version initialized to 0. + // Pallet with no in-code storage version should have the on-chain version initialized to 0. TestExternalities::default().execute_with(|| { - // Example4 current_storage_version is NoStorageVersionSet. + // Example4 in_code_storage_version is NoStorageVersionSet. // Check the pallet has no storage items set. let pallet_hashed_prefix = twox_128(Example4::name().as_bytes()); @@ -2308,7 +2308,7 @@ fn post_runtime_upgrade_detects_storage_version_issues() { impl OnRuntimeUpgrade for CustomUpgrade { fn on_runtime_upgrade() -> Weight { - Example2::current_storage_version().put::(); + Example2::in_code_storage_version().put::(); Default::default() } @@ -2351,14 +2351,14 @@ fn post_runtime_upgrade_detects_storage_version_issues() { >; TestExternalities::default().execute_with(|| { - // Set the on-chain version to one less than the current version for `Example`, simulating a + // Set the on-chain version to one less than the in-code version for `Example`, simulating a // forgotten migration StorageVersion::new(9).put::(); // The version isn't changed, we should detect it. assert!( Executive::try_runtime_upgrade(UpgradeCheckSelect::PreAndPost).unwrap_err() == - "On chain and current storage version do not match. Missing runtime upgrade?" + "On chain and in-code storage version do not match. Missing runtime upgrade?" .into() ); }); diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs index 840a6dee20cc..d2ca9fc80991 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.rs @@ -29,7 +29,7 @@ mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - if Self::current_storage_version() != Self::on_chain_storage_version() { + if Self::in_code_storage_version() != Self::on_chain_storage_version() { } diff --git a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr index 1b48197cc9ed..3256e69528a2 100644 --- a/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/compare_unset_storage_version.stderr @@ -1,7 +1,7 @@ error[E0369]: binary operation `!=` cannot be applied to type `NoStorageVersionSet` --> tests/pallet_ui/compare_unset_storage_version.rs:32:39 | -32 | if Self::current_storage_version() != Self::on_chain_storage_version() { +32 | if Self::in_code_storage_version() != Self::on_chain_storage_version() { | ------------------------------- ^^ -------------------------------- StorageVersion | | | NoStorageVersionSet diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index c527a483d880..01df09106d99 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -525,7 +525,7 @@ pub mod pallet { #[pallet::constant] type DbWeight: Get; - /// Get the chain's current version. + /// Get the chain's in-code version. #[pallet::constant] type Version: Get; diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index 4c7cfc3028a9..8c360fb57d72 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - /// The current storage version. + /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); #[pallet::pallet]