From aef9a7f89d12be399abeb248a486f400c75b72e9 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:00:59 +0100 Subject: [PATCH 01/29] new_partial signature --- Cargo.lock | 65 +++++++++++++++++++++++++ Cargo.toml | 1 + client/node-common/Cargo.toml | 80 +++++++++++++++++++++++++++++++ client/node-common/src/lib.rs | 17 +++++++ client/node-common/src/service.rs | 78 ++++++++++++++++++++++++++++++ 5 files changed, 241 insertions(+) create mode 100644 client/node-common/Cargo.toml create mode 100644 client/node-common/src/lib.rs create mode 100644 client/node-common/src/service.rs diff --git a/Cargo.lock b/Cargo.lock index e4ee5fa7a..7fa980140 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6837,6 +6837,71 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" +[[package]] +name = "node-common" +version = "0.1.0" +dependencies = [ + "async-io", + "async-trait", + "clap", + "cumulus-client-cli", + "cumulus-client-consensus-aura", + "cumulus-client-consensus-common", + "cumulus-client-network", + "cumulus-client-service", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-relay-chain-interface", + "frame-benchmarking", + "frame-benchmarking-cli", + "futures 0.3.28", + "jsonrpsee", + "log", + "nimbus-consensus", + "nimbus-primitives", + "parity-scale-codec", + "polkadot-cli", + "polkadot-primitives", + "polkadot-service", + "sc-basic-authorship", + "sc-chain-spec", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-manual-seal", + "sc-executor", + "sc-network", + "sc-network-common", + "sc-network-sync", + "sc-offchain", + "sc-rpc", + "sc-service", + "sc-sysinfo", + "sc-telemetry", + "sc-tracing", + "sc-transaction-pool", + "sc-transaction-pool-api", + "serde", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-aura", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keystore", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-timestamp", + "sp-transaction-pool", + "substrate-frame-rpc-system", + "substrate-prometheus-endpoint", + "tc-consensus", + "try-runtime-cli", +] + [[package]] name = "nodrop" version = "0.1.14" diff --git a/Cargo.toml b/Cargo.toml index 7afb36e84..4b08d6227 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ dancebox-runtime = { path = "runtime/dancebox", default-features = false } manual-xcm-rpc = { path = "client/manual-xcm" } tc-consensus = { path = "client/consensus" } tc-orchestrator-chain-interface = { path = "client/orchestrator-chain-interface" } +node-common = { path = "client/node-common" } test-relay-sproof-builder = { path = "test-sproof-builder", default-features = false } tp-author-noting-inherent = { path = "primitives/author-noting-inherent", default-features = false } tp-chain-state-snapshot = { path = "primitives/chain-state-snapshot", default-features = false } diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml new file mode 100644 index 000000000..8ac291e2c --- /dev/null +++ b/client/node-common/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "node-common" +authors = { workspace = true } +description = "Common code between various nodes" +edition = "2021" +license = "GPL-3.0-only" +version = "0.1.0" + +[dependencies] +async-io = { workspace = true } +async-trait = { workspace = true } +clap = { workspace = true, features = [ "derive" ] } +futures = { workspace = true } +jsonrpsee = { workspace = true, features = [ "server" ] } +log = { workspace = true } +parity-scale-codec = { workspace = true } +serde = { workspace = true, features = [ "derive" ] } + +# Local +tc-consensus = { workspace = true } + +# Nimbus +nimbus-consensus = { workspace = true } +nimbus-primitives = { workspace = true, features = [ "std" ] } + +# Substrate +frame-benchmarking = { workspace = true } +frame-benchmarking-cli = { workspace = true } +sc-basic-authorship = { workspace = true } +sc-chain-spec = { workspace = true } +sc-cli = { workspace = true } +sc-client-api = { workspace = true } +sc-consensus = { workspace = true } +sc-consensus-manual-seal = { workspace = true } +sc-executor = { workspace = true } +sc-network = { workspace = true } +sc-network-common = { workspace = true } +sc-network-sync = { workspace = true } +sc-offchain = { workspace = true } +sc-rpc = { workspace = true } +sc-service = { workspace = true } +sc-sysinfo = { workspace = true } +sc-telemetry = { workspace = true } +sc-tracing = { workspace = true } +sc-transaction-pool = { workspace = true } +sc-transaction-pool-api = { workspace = true } +sp-api = { workspace = true, features = [ "std" ] } +sp-block-builder = { workspace = true } +sp-blockchain = { workspace = true } +sp-consensus = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true, features = [ "std" ] } +sp-inherents = { workspace = true, features = [ "std" ] } +sp-io = { workspace = true, features = [ "std" ] } +sp-keystore = { workspace = true, features = [ "std" ] } +sp-offchain = { workspace = true, features = [ "std" ] } +sp-runtime = { workspace = true, features = [ "std" ] } +sp-session = { workspace = true, features = [ "std" ] } +sp-timestamp = { workspace = true, features = [ "std" ] } + +sp-transaction-pool = { workspace = true } +substrate-frame-rpc-system = { workspace = true } +substrate-prometheus-endpoint = { workspace = true } +try-runtime-cli = { workspace = true, optional = true } + +# Polkadot +polkadot-cli = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-service = { workspace = true } + +# Cumulus +cumulus-client-cli = { workspace = true } +cumulus-client-consensus-aura = { workspace = true } +cumulus-client-consensus-common = { workspace = true } +cumulus-client-network = { workspace = true } +cumulus-client-service = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-parachain-inherent = { workspace = true } +cumulus-relay-chain-interface = { workspace = true } + diff --git a/client/node-common/src/lib.rs b/client/node-common/src/lib.rs new file mode 100644 index 000000000..673211af3 --- /dev/null +++ b/client/node-common/src/lib.rs @@ -0,0 +1,17 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see . + +pub mod service; diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs new file mode 100644 index 000000000..c7e9094c5 --- /dev/null +++ b/client/node-common/src/service.rs @@ -0,0 +1,78 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see . + +use {sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue}; + +use { + cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, + sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}, + sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient}, + sc_telemetry::{Telemetry, TelemetryWorkerHandle}, + sc_transaction_pool::ChainApi, + std::sync::Arc, +}; + +pub type ParachainExecutor = + NativeElseWasmExecutor; +pub type ParachainClient = + TFullClient>; +pub type ParachainBackend = TFullBackend; +pub type ParachainBlockImport = TParachainBlockImport< + Block, + Arc>, + ParachainBackend, +>; + +type ConstructedRuntimeApi = + >::RuntimeApi; + +pub trait BlockT: cumulus_primitives_core::BlockT {} + +pub fn new_partial( + config: &Configuration, +) -> Result< + PartialComponents< + ParachainClient, + ParachainBackend, + SelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool< + Block, + ParachainClient, + >, + ( + ParachainBlockImport, + Option, + Option, + ), + >, + sc_service::Error, +> +where + Block: BlockT, + ParachainNativeExecutor: NativeExecutionDispatch + 'static, + RuntimeApi: ConstructRuntimeApi> + + Sync + + Send + + 'static, + ConstructedRuntimeApi< + Block, + ParachainClient, + RuntimeApi, + >: TaggedTransactionQueue, +{ + todo!() +} From 5dfb3238eaa5e2892ee4c095a0b89cd6e6f27a71 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:28:21 +0100 Subject: [PATCH 02/29] common new_partial for tanssi and simple (not frontier) --- Cargo.lock | 2 + client/node-common/src/service.rs | 105 ++++++++++++++++-- .../templates/simple/node/Cargo.toml | 1 + .../templates/simple/node/src/service.rs | 78 +------------ node/Cargo.toml | 1 + node/src/container_chain_spawner.rs | 6 +- node/src/service.rs | 82 +------------- 7 files changed, 104 insertions(+), 171 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7fa980140..8302fd273 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1747,6 +1747,7 @@ dependencies = [ "log", "nimbus-consensus", "nimbus-primitives", + "node-common", "parity-scale-codec", "polkadot-cli", "polkadot-primitives", @@ -14637,6 +14638,7 @@ dependencies = [ "manual-xcm-rpc", "nimbus-consensus", "nimbus-primitives", + "node-common", "pallet-author-noting-runtime-api", "pallet-collator-assignment-runtime-api", "pallet-configuration", diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index c7e9094c5..0e8dc891d 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -14,14 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use {sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue}; +use sp_block_builder::BlockBuilder; use { cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, - sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}, + sc_executor::{ + HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, + DEFAULT_HEAP_ALLOC_STRATEGY, + }, sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient}, - sc_telemetry::{Telemetry, TelemetryWorkerHandle}, - sc_transaction_pool::ChainApi, + sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, + sp_api::ConstructRuntimeApi, + sp_transaction_pool::runtime_api::TaggedTransactionQueue, std::sync::Arc, }; @@ -35,14 +39,12 @@ pub type ParachainBlockImport = TPar Arc>, ParachainBackend, >; - -type ConstructedRuntimeApi = +pub type ConstructedRuntimeApi = >::RuntimeApi; -pub trait BlockT: cumulus_primitives_core::BlockT {} - pub fn new_partial( config: &Configuration, + select_chain: SelectChain, ) -> Result< PartialComponents< ParachainClient, @@ -62,7 +64,7 @@ pub fn new_partial( sc_service::Error, > where - Block: BlockT, + Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, RuntimeApi: ConstructRuntimeApi> + Sync @@ -72,7 +74,88 @@ where Block, ParachainClient, RuntimeApi, - >: TaggedTransactionQueue, + >: TaggedTransactionQueue + BlockBuilder, { - todo!() + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { + extra_pages: h as _, + }); + + // Default runtime_cache_size is 2 + // For now we can work with this, but it will likely need + // to change once we start having runtime_cache_sizes, or + // run nodes with the maximum for this value + let wasm = WasmExecutor::builder() + .with_execution_method(config.wasm_method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size) + .build(); + + let executor: ParachainExecutor = + ParachainExecutor::new_with_wasm_executor(wasm); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager + .spawn_handle() + .spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let import_queue = nimbus_consensus::import_queue( + client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + false, + )?; + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain, + other: (block_import, telemetry, telemetry_worker_handle), + }) } diff --git a/container-chains/templates/simple/node/Cargo.toml b/container-chains/templates/simple/node/Cargo.toml index 252ba852b..b598a54cd 100644 --- a/container-chains/templates/simple/node/Cargo.toml +++ b/container-chains/templates/simple/node/Cargo.toml @@ -18,6 +18,7 @@ parity-scale-codec = { workspace = true } serde = { workspace = true, features = [ "derive" ] } # Local +node-common = { workspace = true } container-chain-template-simple-runtime = { workspace = true, features = [ "std" ] } tc-consensus = { workspace = true } diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 550bc33d9..0a8b23ac1 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -92,83 +92,7 @@ pub fn new_partial( >, sc_service::Error, > { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, - }); - - let wasm = WasmExecutor::builder() - .with_execution_method(config.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .build(); - - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = nimbus_consensus::import_queue( - client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - false, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) + node_common::service::new_partial(config, ()) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. diff --git a/node/Cargo.toml b/node/Cargo.toml index 593a8e56f..2b88db0b7 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -22,6 +22,7 @@ serde_json = { workspace = true } tokio = { workspace = true } # Local +node-common = { workspace = true } ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] } dancebox-runtime = { workspace = true, features = [ "std" ] } manual-xcm-rpc = { workspace = true } diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs index c535133ad..ba8c52f00 100644 --- a/node/src/container_chain_spawner.rs +++ b/node/src/container_chain_spawner.rs @@ -49,8 +49,10 @@ use { time::Instant, }, tc_orchestrator_chain_interface::OrchestratorChainInterface, - tokio::sync::{mpsc, oneshot}, - tokio::time::{sleep, Duration}, + tokio::{ + sync::{mpsc, oneshot}, + time::{sleep, Duration}, + }, }; /// Struct with all the params needed to start a container chain node given the CLI arguments, diff --git a/node/src/service.rs b/node/src/service.rs index 4d85901b3..b6f799c05 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -137,87 +137,7 @@ pub fn new_partial( >, sc_service::Error, > { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, - }); - - let wasm = WasmExecutor::builder() - .with_execution_method(config.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .build(); - - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - // The nimbus import queue ONLY checks the signature correctness - // Any other checks corresponding to the author-correctness should be done - // in the runtime - let import_queue = nimbus_consensus::import_queue( - client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - false, - )?; - - let maybe_select_chain = None; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: maybe_select_chain, - other: (block_import, telemetry, telemetry_worker_handle), - }) + node_common::service::new_partial(config, None) } /// Background task used to detect changes to container chain assignment, From 80326e98f541ad58493314555f2d126d6bbad936 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:50:50 +0100 Subject: [PATCH 03/29] extract divergent code with frontier new_partial --- Cargo.lock | 1 + client/node-common/src/service.rs | 114 ++++++++++++------ .../templates/frontier/node/Cargo.toml | 1 + .../templates/frontier/node/src/service.rs | 69 ++--------- .../templates/simple/node/src/service.rs | 48 +++++++- node/src/service.rs | 38 +++++- 6 files changed, 170 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8302fd273..964c06a78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1600,6 +1600,7 @@ dependencies = [ "manual-xcm-rpc", "nimbus-consensus", "nimbus-primitives", + "node-common", "pallet-ethereum", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 0e8dc891d..c934ce736 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -14,7 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use sp_block_builder::BlockBuilder; +use { + sc_service::{KeystoreContainer, TaskManager}, + sp_block_builder::BlockBuilder, +}; use { cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, @@ -22,7 +25,7 @@ use { HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }, - sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient}, + sc_service::{Configuration, TFullBackend, TFullClient}, sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue, @@ -42,25 +45,53 @@ pub type ParachainBlockImport = TPar pub type ConstructedRuntimeApi = >::RuntimeApi; -pub fn new_partial( - config: &Configuration, - select_chain: SelectChain, -) -> Result< - PartialComponents< +pub struct NewPartial +where + Block: cumulus_primitives_core::BlockT, + ParachainNativeExecutor: NativeExecutionDispatch + 'static, + RuntimeApi: ConstructRuntimeApi> + + Sync + + Send + + 'static, + ConstructedRuntimeApi< + Block, ParachainClient, - ParachainBackend, - SelectChain, - sc_consensus::DefaultImportQueue, + RuntimeApi, + >: TaggedTransactionQueue + BlockBuilder, +{ + pub client: Arc>, + pub backend: Arc>, + pub task_manager: TaskManager, + pub keystore_container: KeystoreContainer, + pub transaction_pool: Arc< sc_transaction_pool::FullPool< Block, ParachainClient, >, - ( - ParachainBlockImport, - Option, - Option, - ), >, + pub telemetry: Option, + pub telemetry_worker_handle: Option, +} + +pub fn new_partial( + config: &Configuration, +) -> Result< + // PartialComponents< + // ParachainClient, + // ParachainBackend, + // SelectChain, + // sc_consensus::DefaultImportQueue, + // sc_transaction_pool::FullPool< + // Block, + // ParachainClient, + // >, + // ( + // ParachainBlockImport, + // Option, + // Option, + // ), + // >, + NewPartial, sc_service::Error, > where @@ -133,29 +164,38 @@ where client.clone(), ); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = nimbus_consensus::import_queue( - client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - false, - )?; - - Ok(PartialComponents { - backend, + // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + // let import_queue = nimbus_consensus::import_queue( + // client.clone(), + // block_import.clone(), + // move |_, _| async move { + // let time = sp_timestamp::InherentDataProvider::from_system_time(); + + // Ok((time,)) + // }, + // &task_manager.spawn_essential_handle(), + // config.prometheus_registry(), + // false, + // )?; + + // Ok(PartialComponents { + // backend, + // client, + // import_queue, + // keystore_container, + // task_manager, + // transaction_pool, + // select_chain, + // other: (block_import, telemetry, telemetry_worker_handle), + // }) + Ok(NewPartial { client, - import_queue, - keystore_container, - task_manager, + backend, transaction_pool, - select_chain, - other: (block_import, telemetry, telemetry_worker_handle), + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, }) } diff --git a/container-chains/templates/frontier/node/Cargo.toml b/container-chains/templates/frontier/node/Cargo.toml index 4c868bdd5..7ab6c5189 100644 --- a/container-chains/templates/frontier/node/Cargo.toml +++ b/container-chains/templates/frontier/node/Cargo.toml @@ -21,6 +21,7 @@ serde = { workspace = true, features = [ "derive" ] } url = { workspace = true } # Local +node-common = { workspace = true } ccp-authorities-noting-inherent = { workspace = true } container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] } manual-xcm-rpc = { workspace = true } diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 9a28d6178..99d31eeae 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -18,7 +18,6 @@ use { cumulus_client_consensus_common::ParachainBlockImport, - sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}, sc_network::config::FullNetworkConfiguration, }; // std @@ -65,9 +64,11 @@ use { sc_executor::NativeElseWasmExecutor, sc_network::NetworkBlock, sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, - sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, + sc_telemetry::{Telemetry, TelemetryWorkerHandle}, }; +use node_common::service::NewPartial; + /// Native executor type. use crate::client::TemplateRuntimeExecutor; @@ -160,53 +161,15 @@ pub fn new_partial( // Use ethereum style for subscription ids config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - // Default runtime_cache_size is 2 - // For now we can work with this, but it will likely need - // to change once we start having runtime_cache_sizes, or - // run nodes with the maximum for this value - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, - }); - - let wasm = WasmExecutor::builder() - .with_execution_method(config.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .build(); - - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); + let NewPartial { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + } = node_common::service::new_partial(config)?; let maybe_select_chain = if dev_service { Some(sc_consensus::LongestChain::new(backend.clone())) @@ -214,14 +177,6 @@ pub fn new_partial( None }; - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 0a8b23ac1..6b81d696f 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -1,5 +1,3 @@ -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - // Copyright (C) Moondance Labs Ltd. // This file is part of Tanssi. @@ -15,14 +13,18 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; + +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. // std use std::{sync::Arc, time::Duration}; use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfiguration}; // Local Runtime Types -use container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}; +use { + container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, + node_common::service::NewPartial, +}; // Cumulus Imports #[allow(deprecated)] @@ -44,7 +46,7 @@ use { sc_executor::NativeElseWasmExecutor, sc_network::NetworkBlock, sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, - sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, + sc_telemetry::{Telemetry, TelemetryWorkerHandle}, sc_transaction_pool_api::OffchainTransactionPoolFactory, }; @@ -92,7 +94,41 @@ pub fn new_partial( >, sc_service::Error, > { - node_common::service::new_partial(config, ()) + let NewPartial { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + } = node_common::service::new_partial(config)?; + + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let import_queue = nimbus_consensus::import_queue( + client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + false, + )?; + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (block_import, telemetry, telemetry_worker_handle), + }) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. diff --git a/node/src/service.rs b/node/src/service.rs index b6f799c05..07567c0d7 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -16,6 +16,8 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +use node_common::service::NewPartial; + #[allow(deprecated)] use { crate::{ @@ -137,7 +139,41 @@ pub fn new_partial( >, sc_service::Error, > { - node_common::service::new_partial(config, None) + let NewPartial { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + } = node_common::service::new_partial(config)?; + + let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + let import_queue = nimbus_consensus::import_queue( + client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + false, + )?; + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: None, + other: (block_import, telemetry, telemetry_worker_handle), + }) } /// Background task used to detect changes to container chain assignment, From 6b71e471db64640273b20f9afc2005bb406bc735 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 11:55:18 +0100 Subject: [PATCH 04/29] cleanup --- client/node-common/src/service.rs | 48 ++----------------------------- node/src/service.rs | 3 ++ 2 files changed, 5 insertions(+), 46 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index c934ce736..0047c6a01 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -75,25 +75,7 @@ where pub fn new_partial( config: &Configuration, -) -> Result< - // PartialComponents< - // ParachainClient, - // ParachainBackend, - // SelectChain, - // sc_consensus::DefaultImportQueue, - // sc_transaction_pool::FullPool< - // Block, - // ParachainClient, - // >, - // ( - // ParachainBlockImport, - // Option, - // Option, - // ), - // >, - NewPartial, - sc_service::Error, -> +) -> Result, sc_service::Error> where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -136,8 +118,7 @@ where .with_runtime_cache_size(config.runtime_cache_size) .build(); - let executor: ParachainExecutor = - ParachainExecutor::new_with_wasm_executor(wasm); + let executor = ParachainExecutor::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( @@ -164,31 +145,6 @@ where client.clone(), ); - // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - // let import_queue = nimbus_consensus::import_queue( - // client.clone(), - // block_import.clone(), - // move |_, _| async move { - // let time = sp_timestamp::InherentDataProvider::from_system_time(); - - // Ok((time,)) - // }, - // &task_manager.spawn_essential_handle(), - // config.prometheus_registry(), - // false, - // )?; - - // Ok(PartialComponents { - // backend, - // client, - // import_queue, - // keystore_container, - // task_manager, - // transaction_pool, - // select_chain, - // other: (block_import, telemetry, telemetry_worker_handle), - // }) Ok(NewPartial { client, backend, diff --git a/node/src/service.rs b/node/src/service.rs index 07567c0d7..bbf6d8132 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -151,6 +151,9 @@ pub fn new_partial( let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + // The nimbus import queue ONLY checks the signature correctness + // Any other checks corresponding to the author-correctness should be done + // in the runtime let import_queue = nimbus_consensus::import_queue( client.clone(), block_import.clone(), From 3bb5279b61edadd88984b12bdb9848e9bbb633a0 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:03:04 +0100 Subject: [PATCH 05/29] update new_partial_dev --- node/src/service.rs | 66 ++++++++------------------------------------- 1 file changed, 11 insertions(+), 55 deletions(-) diff --git a/node/src/service.rs b/node/src/service.rs index bbf6d8132..8f6d89e92 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -55,16 +55,14 @@ use { UsageProvider, }, sc_consensus::{BlockImport, ImportQueue}, - sc_executor::{ - HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, - }, + sc_executor::NativeElseWasmExecutor, sc_network::{config::FullNetworkConfiguration, NetworkBlock}, sc_network_sync::SyncingService, sc_service::{ Configuration, Error as ServiceError, PartialComponents, TFullBackend, TFullClient, TaskManager, }, - sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}, + sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorkerHandle}, sp_api::StorageProof, sp_consensus::SyncOracle, sp_core::{ @@ -271,57 +269,15 @@ pub fn new_partial_dev( >, sc_service::Error, > { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, - }); - - let wasm = WasmExecutor::builder() - .with_execution_method(config.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .build(); - - let executor = ParachainExecutor::new_with_wasm_executor(wasm); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); + let NewPartial { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + } = node_common::service::new_partial(config)?; let block_import = DevParachainBlockImport::new(client.clone()); let import_queue = build_manual_seal_import_queue( From f7840b7372fb8cb806755d5307cd6271adda3093 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 14:01:43 +0100 Subject: [PATCH 06/29] use macro to reduce generics verbosity --- client/node-common/src/service.rs | 59 +++++++++++-------------------- 1 file changed, 20 insertions(+), 39 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 0047c6a01..90f14afe8 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -32,43 +32,31 @@ use { std::sync::Arc, }; -pub type ParachainExecutor = - NativeElseWasmExecutor; -pub type ParachainClient = - TFullClient>; -pub type ParachainBackend = TFullBackend; -pub type ParachainBlockImport = TParachainBlockImport< - Block, - Arc>, - ParachainBackend, ->; -pub type ConstructedRuntimeApi = - >::RuntimeApi; +/// Functions in this module are generic over `Block`, `RuntimeApi`, and +/// `ParachainNativeExecutor`. Using type aliases requires them to be +/// generic too, which makes them still verbose to use. For that reason we use +/// a macro that expect the above types to already be in scope. +macro_rules! T { + [Executor] => { NativeElseWasmExecutor }; + [Client] => { TFullClient }; + [Backend] => { TFullBackend }; + [ConstructedRuntimeApi] => { + >::RuntimeApi + }; +} pub struct NewPartial where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi> - + Sync - + Send - + 'static, - ConstructedRuntimeApi< - Block, - ParachainClient, - RuntimeApi, - >: TaggedTransactionQueue + BlockBuilder, + RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, { - pub client: Arc>, - pub backend: Arc>, + pub client: Arc, + pub backend: Arc, pub task_manager: TaskManager, pub keystore_container: KeystoreContainer, - pub transaction_pool: Arc< - sc_transaction_pool::FullPool< - Block, - ParachainClient, - >, - >, + pub transaction_pool: Arc>, pub telemetry: Option, pub telemetry_worker_handle: Option, } @@ -79,15 +67,8 @@ pub fn new_partial( where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi> - + Sync - + Send - + 'static, - ConstructedRuntimeApi< - Block, - ParachainClient, - RuntimeApi, - >: TaggedTransactionQueue + BlockBuilder, + RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, { let telemetry = config .telemetry_endpoints @@ -118,7 +99,7 @@ where .with_runtime_cache_size(config.runtime_cache_size) .build(); - let executor = ParachainExecutor::new_with_wasm_executor(wasm); + let executor = ::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( From 8cd20962468eaf9c5c2728efb89be7c868a7ee63 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 3 Nov 2023 16:41:59 +0100 Subject: [PATCH 07/29] generic build_cumulus_network based on custom import_queue + toml-sort --- Cargo.lock | 2 + Cargo.toml | 32 +- client/consensus/Cargo.toml | 2 +- client/manual-xcm/Cargo.toml | 2 +- client/node-common/Cargo.toml | 3 +- client/node-common/src/service.rs | 307 +++++++++++++----- .../templates/frontier/node/Cargo.toml | 14 +- .../templates/frontier/node/src/service.rs | 120 +++---- .../templates/frontier/runtime/Cargo.toml | 28 +- .../templates/simple/node/Cargo.toml | 7 +- .../templates/simple/node/src/service.rs | 73 +++-- .../templates/simple/runtime/Cargo.toml | 20 +- node/Cargo.toml | 8 +- node/src/service.rs | 180 ++++++---- pallets/collator-assignment/Cargo.toml | 10 +- pallets/pooled-staking/Cargo.toml | 2 +- pallets/registrar/Cargo.toml | 2 +- .../container-chain-genesis-data/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/traits/Cargo.toml | 2 +- runtime/dancebox/Cargo.toml | 64 +--- 21 files changed, 515 insertions(+), 367 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 964c06a78..1d3742b80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6875,6 +6875,7 @@ dependencies = [ "sc-network", "sc-network-common", "sc-network-sync", + "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-service", @@ -6883,6 +6884,7 @@ dependencies = [ "sc-tracing", "sc-transaction-pool", "sc-transaction-pool-api", + "sc-utils", "serde", "sp-api", "sp-block-builder", diff --git a/Cargo.toml b/Cargo.toml index 4b08d6227..225699beb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,9 +45,9 @@ pallet-cc-authorities-noting = { path = "container-chains/pallets/authorities-no dancebox-runtime = { path = "runtime/dancebox", default-features = false } manual-xcm-rpc = { path = "client/manual-xcm" } +node-common = { path = "client/node-common" } tc-consensus = { path = "client/consensus" } tc-orchestrator-chain-interface = { path = "client/orchestrator-chain-interface" } -node-common = { path = "client/node-common" } test-relay-sproof-builder = { path = "test-sproof-builder", default-features = false } tp-author-noting-inherent = { path = "primitives/author-noting-inherent", default-features = false } tp-chain-state-snapshot = { path = "primitives/chain-state-snapshot", default-features = false } @@ -67,7 +67,6 @@ pallet-migrations = { git = "https://github.com/moondance-labs/moonkit", branch xcm-primitives = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } # Substrate (wasm) -sp-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } frame-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } frame-executive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } frame-support = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", version = "4.0.0-dev", default-features = false } @@ -94,6 +93,7 @@ sp-block-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", bra sp-consensus = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } sp-consensus-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } sp-consensus-babe = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } +sp-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } sp-consensus-slots = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } sp-core = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", version = "21.0.0", default-features = false } sp-debug-derive = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", default-features = false } @@ -129,6 +129,7 @@ sc-network = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = sc-network-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-network-sync = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-network-test = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } +sc-network-transactions = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-offchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-service = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } @@ -137,6 +138,7 @@ sc-telemetry = { git = "https://github.com/moondance-labs/polkadot-sdk", branch sc-tracing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-transaction-pool = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sc-transaction-pool-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } +sc-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sp-blockchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } sp-externalities = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } sp-keystore = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } @@ -157,11 +159,11 @@ pallet-xcm-benchmarks = { git = "https://github.com/moondance-labs/polkadot-sdk" polkadot-parachain-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } polkadot-runtime-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } polkadot-runtime-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } -westend-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } -westend-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } staging-xcm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } staging-xcm-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } staging-xcm-executor = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } +westend-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } +westend-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } # Polkadot (client) polkadot-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" } @@ -224,9 +226,9 @@ fc-storage = { git = "https://github.com/moondance-labs/frontier", branch = "tan bounded-collections = { version = "0.1.8", default-features = false } hex-literal = { version = "0.3.4" } log = { version = "0.4.17", default-features = false } +rand_chacha = { version = "0.3.1", default-features = false } serde = { version = "1.0.152", default-features = false } smallvec = "1.10.0" -rand_chacha = { version = "0.3.1", default-features = false } # General (client) async-io = "1.3" @@ -249,6 +251,16 @@ tokio = { version = "1.32.0", default-features = false } tracing = { version = "0.1.37", default-features = false } url = "2.2.2" +[patch.crates-io] +jsonrpsee = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-client-transport = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-core = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-http-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-proc-macros = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-server = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-types = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } +jsonrpsee-ws-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } + [profile.production] codegen-units = 1 inherits = "release" @@ -258,13 +270,3 @@ lto = true [profile.release] opt-level = 3 panic = "unwind" - -[patch.crates-io] -jsonrpsee = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-client-transport = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-core = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-types = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-http-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-proc-macros = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-server = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } -jsonrpsee-ws-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" } diff --git a/client/consensus/Cargo.toml b/client/consensus/Cargo.toml index a8832a4f0..9a8acf9c1 100644 --- a/client/consensus/Cargo.toml +++ b/client/consensus/Cargo.toml @@ -14,7 +14,7 @@ sc-consensus-manual-seal = { workspace = true } sc-consensus-slots = { workspace = true } sc-telemetry = { workspace = true } sp-api = { workspace = true } -sp-application-crypto = { workspace = true, features = [ "std", "full_crypto"] } +sp-application-crypto = { workspace = true, features = [ "full_crypto", "std" ] } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } sp-consensus = { workspace = true } diff --git a/client/manual-xcm/Cargo.toml b/client/manual-xcm/Cargo.toml index 02511af71..993881404 100644 --- a/client/manual-xcm/Cargo.toml +++ b/client/manual-xcm/Cargo.toml @@ -12,7 +12,7 @@ futures = { workspace = true, features = [ "compat" ] } hex-literal = { workspace = true } jsonrpsee = { workspace = true, features = [ "macros", "server" ] } parity-scale-codec = { workspace = true, features = [ "std" ] } -tokio = { workspace = true, features = [ "sync", "time" ] } staging-xcm = { workspace = true } +tokio = { workspace = true, features = [ "sync", "time" ] } cumulus-primitives-core = { workspace = true, features = [ "std" ] } diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml index 8ac291e2c..a251f5143 100644 --- a/client/node-common/Cargo.toml +++ b/client/node-common/Cargo.toml @@ -36,6 +36,7 @@ sc-executor = { workspace = true } sc-network = { workspace = true } sc-network-common = { workspace = true } sc-network-sync = { workspace = true } +sc-network-transactions = { workspace = true } sc-offchain = { workspace = true } sc-rpc = { workspace = true } sc-service = { workspace = true } @@ -44,6 +45,7 @@ sc-telemetry = { workspace = true } sc-tracing = { workspace = true } sc-transaction-pool = { workspace = true } sc-transaction-pool-api = { workspace = true } +sc-utils = { workspace = true } sp-api = { workspace = true, features = [ "std" ] } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } @@ -77,4 +79,3 @@ cumulus-client-service = { workspace = true } cumulus-primitives-core = { workspace = true } cumulus-primitives-parachain-inherent = { workspace = true } cumulus-relay-chain-interface = { workspace = true } - diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 90f14afe8..4c3ff78a4 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -14,20 +14,33 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use { - sc_service::{KeystoreContainer, TaskManager}, - sp_block_builder::BlockBuilder, -}; +use {futures::FutureExt, sp_offchain::OffchainWorkerApi}; use { - cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, + cumulus_client_cli::CollatorOptions, + cumulus_client_service::{ + build_relay_chain_interface, prepare_node_config, CollatorSybilResistance, + }, + cumulus_primitives_core::ParaId, + cumulus_relay_chain_interface::RelayChainInterface, + polkadot_primitives::CollatorPair, + sc_client_api::Backend, + sc_consensus::ImportQueue, sc_executor::{ HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }, - sc_service::{Configuration, TFullBackend, TFullClient}, + sc_network::{config::FullNetworkConfiguration, NetworkService}, + sc_network_sync::SyncingService, + sc_network_transactions::TransactionsHandlerController, + sc_service::{ + Configuration, KeystoreContainer, NetworkStarter, TFullBackend, TFullClient, TaskManager, + }, sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, + sc_transaction_pool_api::OffchainTransactionPoolFactory, + sc_utils::mpsc::TracingUnboundedSender, sp_api::ConstructRuntimeApi, + sp_block_builder::BlockBuilder, sp_transaction_pool::runtime_api::TaggedTransactionQueue, std::sync::Arc, }; @@ -43,10 +56,33 @@ macro_rules! T { [ConstructedRuntimeApi] => { >::RuntimeApi }; + [Where] => { + Block: cumulus_primitives_core::BlockT, + ParachainNativeExecutor: NativeExecutionDispatch + 'static, + RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, + } } -pub struct NewPartial -where +pub struct CumulusNetwork { + pub network: Arc>, + pub system_rpc_tx: TracingUnboundedSender>, + pub tx_handler_controller: TransactionsHandlerController, + pub start_network: NetworkStarter, + pub sync_service: Arc>, +} + +pub struct NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + // `cumulus_client_service::build_network` returns many important systems, + // but can only be called with an `import_queue` which can be different in + // each node. For that reason it is a `()` when calling `new`, then the + // caller create the `import_queue` using systems contained in `NodeBuilder`, + // then call `build_cumulus_network` with it to generate the cumulus systems. + Cumulus = (), +> where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, @@ -59,80 +95,203 @@ where pub transaction_pool: Arc>, pub telemetry: Option, pub telemetry_worker_handle: Option, + + pub relay_chain_interface: Arc, + pub collator_key: Option, + + pub cumulus: Cumulus, } -pub fn new_partial( - config: &Configuration, -) -> Result, sc_service::Error> +impl + NodeBuilder where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + + BlockBuilder + + cumulus_primitives_core::CollectCollationInfo, { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; + pub async fn new( + parachain_config: &Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + hwbench: Option, + ) -> Result { + let telemetry = parachain_config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = + parachain_config + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { + extra_pages: h as _, + }); + + // Default runtime_cache_size is 2 + // For now we can work with this, but it will likely need + // to change once we start having runtime_cache_sizes, or + // run nodes with the maximum for this value + let wasm = WasmExecutor::builder() + .with_execution_method(parachain_config.wasm_method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .with_max_runtime_instances(parachain_config.max_runtime_instances) + .with_runtime_cache_size(parachain_config.runtime_cache_size) + .build(); + + let executor = ::new_with_wasm_executor(wasm); + + let (client, backend, keystore_container, mut task_manager) = + sc_service::new_full_parts::( + parachain_config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); - let heap_pages = config - .default_heap_pages - .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { - extra_pages: h as _, + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager + .spawn_handle() + .spawn("telemetry", None, worker.run()); + telemetry }); - // Default runtime_cache_size is 2 - // For now we can work with this, but it will likely need - // to change once we start having runtime_cache_sizes, or - // run nodes with the maximum for this value - let wasm = WasmExecutor::builder() - .with_execution_method(config.wasm_method) - .with_onchain_heap_alloc_strategy(heap_pages) - .with_offchain_heap_alloc_strategy(heap_pages) - .with_max_runtime_instances(config.max_runtime_instances) - .with_runtime_cache_size(config.runtime_cache_size) - .build(); - - let executor = ::new_with_wasm_executor(wasm); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager - .spawn_handle() - .spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - Ok(NewPartial { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - }) + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + parachain_config.transaction_pool.clone(), + parachain_config.role.is_authority().into(), + parachain_config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (relay_chain_interface, collator_key) = build_relay_chain_interface( + polkadot_config, + ¶chain_config, + telemetry_worker_handle.clone(), + &mut task_manager, + collator_options.clone(), + hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + + Ok(Self { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + relay_chain_interface, + collator_key, + cumulus: (), + }) + } + + pub async fn build_cumulus_network( + self, + parachain_config: &Configuration, + para_id: ParaId, + import_queue: impl ImportQueue + 'static, + ) -> sc_service::error::Result< + NodeBuilder>, + > { + let Self { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + relay_chain_interface, + collator_key, + cumulus: (), + } = self; + + let net_config = FullNetworkConfiguration::new(¶chain_config.network); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { + parachain_config: ¶chain_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue: import_queue, + para_id, + relay_chain_interface: relay_chain_interface.clone(), + net_config, + sybil_resistance_level: CollatorSybilResistance::Resistant, + }) + .await?; + + Ok(NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + relay_chain_interface, + collator_key, + cumulus: CumulusNetwork { + network, + system_rpc_tx, + tx_handler_controller, + start_network, + sync_service, + }, + }) + } +} + +impl + NodeBuilder> +where + Block: cumulus_primitives_core::BlockT, + ParachainNativeExecutor: NativeExecutionDispatch + 'static, + RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, + T![ConstructedRuntimeApi]: + TaggedTransactionQueue + BlockBuilder + OffchainWorkerApi, +{ + pub fn spawn_common_tasks( + &mut self, + parachain_config: &Configuration, + ) -> sc_service::error::Result<()> { + if parachain_config.offchain_worker.enabled { + self.task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: self.client.clone(), + keystore: Some(self.keystore_container.keystore()), + offchain_db: self.backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + self.transaction_pool.clone(), + )), + network_provider: self.cumulus.network.clone(), + is_validator: parachain_config.role.is_authority(), + enable_http_requests: false, + custom_extensions: move |_| vec![], + }) + .run(self.client.clone(), self.task_manager.spawn_handle()) + .boxed(), + ); + } + + Ok(()) + } } diff --git a/container-chains/templates/frontier/node/Cargo.toml b/container-chains/templates/frontier/node/Cargo.toml index 7ab6c5189..ff67e4984 100644 --- a/container-chains/templates/frontier/node/Cargo.toml +++ b/container-chains/templates/frontier/node/Cargo.toml @@ -21,10 +21,10 @@ serde = { workspace = true, features = [ "derive" ] } url = { workspace = true } # Local -node-common = { workspace = true } ccp-authorities-noting-inherent = { workspace = true } container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] } manual-xcm-rpc = { workspace = true } +node-common = { workspace = true } tc-consensus = { workspace = true } # Nimbus @@ -43,7 +43,6 @@ sc-cli = { workspace = true } sc-client-api = { workspace = true } sc-consensus = { workspace = true } sc-consensus-manual-seal = { workspace = true } -sp-debug-derive = { workspace = true } sc-executor = { workspace = true } sc-network = { workspace = true } sc-network-common = { workspace = true } @@ -60,6 +59,7 @@ sp-api = { workspace = true, features = [ "std" ] } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } sp-consensus = { workspace = true } +sp-debug-derive = { workspace = true } sp-consensus-aura = { workspace = true } sp-core = { workspace = true, features = [ "std" ] } @@ -97,8 +97,8 @@ fc-cli = { workspace = true } fc-consensus = { workspace = true } fc-db = { workspace = true, features = [ "sql" ] } fc-mapping-sync = { workspace = true, features = [ "sql" ] } -fc-rpc = { workspace = true, features = ["txpool"] } -fc-rpc-core = { workspace = true, features = ["txpool"] } +fc-rpc = { workspace = true, features = [ "txpool" ] } +fc-rpc-core = { workspace = true, features = [ "txpool" ] } fc-storage = { workspace = true } fp-evm = { workspace = true } fp-rpc = { workspace = true } @@ -108,11 +108,7 @@ substrate-build-script-utils = { workspace = true } [features] default = [] -runtime-benchmarks = [ - "container-chain-template-frontier-runtime/runtime-benchmarks", - "pallet-ethereum/runtime-benchmarks", - "polkadot-cli/runtime-benchmarks" -] +runtime-benchmarks = [ "container-chain-template-frontier-runtime/runtime-benchmarks", "pallet-ethereum/runtime-benchmarks", "polkadot-cli/runtime-benchmarks" ] try-runtime = [ "container-chain-template-frontier-runtime/try-runtime", "try-runtime-cli/try-runtime", diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 99d31eeae..4b79a51be 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -67,7 +67,7 @@ use { sc_telemetry::{Telemetry, TelemetryWorkerHandle}, }; -use node_common::service::NewPartial; +use node_common::service::NodeBuilder; /// Native executor type. use crate::client::TemplateRuntimeExecutor; @@ -161,64 +161,66 @@ pub fn new_partial( // Use ethereum style for subscription ids config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); - let NewPartial { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - } = node_common::service::new_partial(config)?; - - let maybe_select_chain = if dev_service { - Some(sc_consensus::LongestChain::new(backend.clone())) - } else { - None - }; - - let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); - let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); - - let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend(client.clone(), config)?); - - let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone()); - - let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( - frontier_block_import, - backend.clone(), - ); - - let import_queue = nimbus_consensus::import_queue( - client.clone(), - parachain_block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - !dev_service, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: maybe_select_chain, - other: ( - parachain_block_import, - filter_pool, - telemetry, - telemetry_worker_handle, - frontier_backend, - fee_history_cache, - ), - }) + todo!() + + // let NodeBuilder { + // client, + // backend, + // transaction_pool, + // telemetry, + // telemetry_worker_handle, + // task_manager, + // keystore_container, + // } = node_common::service::new_partial(config)?; + + // let maybe_select_chain = if dev_service { + // Some(sc_consensus::LongestChain::new(backend.clone())) + // } else { + // None + // }; + + // let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); + // let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); + + // let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend(client.clone(), config)?); + + // let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone()); + + // let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( + // frontier_block_import, + // backend.clone(), + // ); + + // let import_queue = nimbus_consensus::import_queue( + // client.clone(), + // parachain_block_import.clone(), + // move |_, _| async move { + // let time = sp_timestamp::InherentDataProvider::from_system_time(); + + // Ok((time,)) + // }, + // &task_manager.spawn_essential_handle(), + // config.prometheus_registry(), + // !dev_service, + // )?; + + // Ok(PartialComponents { + // backend, + // client, + // import_queue, + // keystore_container, + // task_manager, + // transaction_pool, + // select_chain: maybe_select_chain, + // other: ( + // parachain_block_import, + // filter_pool, + // telemetry, + // telemetry_worker_handle, + // frontier_backend, + // fee_history_cache, + // ), + // }) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. diff --git a/container-chains/templates/frontier/runtime/Cargo.toml b/container-chains/templates/frontier/runtime/Cargo.toml index d87cb0325..870b2f0d5 100644 --- a/container-chains/templates/frontier/runtime/Cargo.toml +++ b/container-chains/templates/frontier/runtime/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0-only" version = "0.1.0" [package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] +targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] hex-literal = { workspace = true, optional = true } log = { workspace = true } -parity-scale-codec = { workspace = true, features = ["derive"] } -scale-info = { workspace = true, features = ["derive"] } -serde = { workspace = true, optional = true, features = ["derive"] } +parity-scale-codec = { workspace = true, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } +serde = { workspace = true, optional = true, features = [ "derive" ] } smallvec = { workspace = true } # Local @@ -25,7 +25,7 @@ tp-consensus = { workspace = true } # Moonkit nimbus-primitives = { workspace = true } pallet-author-inherent = { workspace = true } -pallet-maintenance-mode = { workspace = true, features = ["xcm-support"] } +pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] } pallet-migrations = { workspace = true } xcm-primitives = { workspace = true } @@ -35,14 +35,14 @@ frame-support = { workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } frame-try-runtime = { workspace = true, optional = true } -pallet-balances = { workspace = true, features = ["insecure_zero_ed"] } +pallet-balances = { workspace = true, features = [ "insecure_zero_ed" ] } +pallet-proxy = { workspace = true } pallet-root-testing = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-utility = { workspace = true } -pallet-proxy = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -78,10 +78,10 @@ cumulus-primitives-utility = { workspace = true } parachain-info = { workspace = true } # Frontier -fp-account = { workspace = true, features = ["serde"] } -fp-evm = { workspace = true, features = ["serde"] } +fp-account = { workspace = true, features = [ "serde" ] } +fp-evm = { workspace = true, features = [ "serde" ] } fp-rpc = { workspace = true } -fp-self-contained = { workspace = true, features = ["serde"] } +fp-self-contained = { workspace = true, features = [ "serde" ] } pallet-base-fee = { workspace = true } pallet-dynamic-fee = { workspace = true } pallet-ethereum = { workspace = true } @@ -95,7 +95,7 @@ pallet-hotfix-sufficients = { workspace = true } substrate-wasm-builder = { workspace = true } [features] -default = ["std"] +default = [ "std" ] std = [ "ccp-xcm/std", "cumulus-pallet-dmp-queue/std", @@ -151,15 +151,15 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "tp-consensus/std", "staging-xcm-builder/std", "staging-xcm-executor/std", "staging-xcm/std", + "tp-consensus/std", "xcm-primitives/std", ] # Allow to print logs details (no wasm:stripped) -force-debug = ["sp-debug-derive/force-debug"] +force-debug = [ "sp-debug-derive/force-debug" ] runtime-benchmarks = [ "cumulus-pallet-session-benchmarking/runtime-benchmarks", @@ -191,11 +191,11 @@ try-runtime = [ "pallet-evm/try-runtime", "pallet-hotfix-sufficients/try-runtime", "pallet-maintenance-mode/try-runtime", + "pallet-proxy/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-utility/try-runtime", - "pallet-proxy/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", diff --git a/container-chains/templates/simple/node/Cargo.toml b/container-chains/templates/simple/node/Cargo.toml index b598a54cd..0fa7a4659 100644 --- a/container-chains/templates/simple/node/Cargo.toml +++ b/container-chains/templates/simple/node/Cargo.toml @@ -18,8 +18,8 @@ parity-scale-codec = { workspace = true } serde = { workspace = true, features = [ "derive" ] } # Local -node-common = { workspace = true } container-chain-template-simple-runtime = { workspace = true, features = [ "std" ] } +node-common = { workspace = true } tc-consensus = { workspace = true } # Nimbus @@ -85,10 +85,7 @@ substrate-build-script-utils = { workspace = true } [features] default = [] -runtime-benchmarks = [ - "container-chain-template-simple-runtime/runtime-benchmarks", - "polkadot-cli/runtime-benchmarks" -] +runtime-benchmarks = [ "container-chain-template-simple-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks" ] try-runtime = [ "container-chain-template-simple-runtime/try-runtime", "try-runtime-cli/try-runtime", diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 6b81d696f..4be4239e6 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -23,7 +23,7 @@ use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfigu // Local Runtime Types use { container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, - node_common::service::NewPartial, + node_common::service::NodeBuilder, }; // Cumulus Imports @@ -94,41 +94,42 @@ pub fn new_partial( >, sc_service::Error, > { - let NewPartial { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - } = node_common::service::new_partial(config)?; - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - let import_queue = nimbus_consensus::import_queue( - client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - false, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (block_import, telemetry, telemetry_worker_handle), - }) + todo!() + // let NodeBuilder { + // client, + // backend, + // transaction_pool, + // telemetry, + // telemetry_worker_handle, + // task_manager, + // keystore_container, + // } = node_common::service::NodeBuilder::new(config)?; + + // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + // let import_queue = nimbus_consensus::import_queue( + // client.clone(), + // block_import.clone(), + // move |_, _| async move { + // let time = sp_timestamp::InherentDataProvider::from_system_time(); + + // Ok((time,)) + // }, + // &task_manager.spawn_essential_handle(), + // config.prometheus_registry(), + // false, + // )?; + + // Ok(PartialComponents { + // backend, + // client, + // import_queue, + // keystore_container, + // task_manager, + // transaction_pool, + // select_chain: (), + // other: (block_import, telemetry, telemetry_worker_handle), + // }) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. diff --git a/container-chains/templates/simple/runtime/Cargo.toml b/container-chains/templates/simple/runtime/Cargo.toml index 91dcd0d69..d398010ec 100644 --- a/container-chains/templates/simple/runtime/Cargo.toml +++ b/container-chains/templates/simple/runtime/Cargo.toml @@ -7,14 +7,14 @@ license = "GPL-3.0-only" version = "0.1.0" [package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] +targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] hex-literal = { workspace = true, optional = true } log = { workspace = true } -parity-scale-codec = { workspace = true, features = ["derive"] } -scale-info = { workspace = true, features = ["derive"] } -serde = { workspace = true, optional = true, features = ["derive"] } +parity-scale-codec = { workspace = true, features = [ "derive" ] } +scale-info = { workspace = true, features = [ "derive" ] } +serde = { workspace = true, optional = true, features = [ "derive" ] } smallvec = { workspace = true } # Local @@ -24,7 +24,7 @@ tp-consensus = { workspace = true } # Moonkit nimbus-primitives = { workspace = true } pallet-author-inherent = { workspace = true } -pallet-maintenance-mode = { workspace = true, features = ["xcm-support"] } +pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] } pallet-migrations = { workspace = true } xcm-primitives = { workspace = true } @@ -34,13 +34,13 @@ frame-support = { workspace = true } frame-system = { workspace = true } frame-system-rpc-runtime-api = { workspace = true } pallet-balances = { workspace = true } +pallet-proxy = { workspace = true } pallet-session = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true } pallet-utility = { workspace = true } -pallet-proxy = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -83,7 +83,7 @@ frame-try-runtime = { workspace = true, optional = true } substrate-wasm-builder = { workspace = true } [features] -default = ["std"] +default = [ "std" ] std = [ "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", @@ -127,15 +127,15 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "tp-consensus/std", "staging-xcm-builder/std", "staging-xcm-executor/std", "staging-xcm/std", + "tp-consensus/std", "xcm-primitives/std", ] # Allow to print logs details (no wasm:stripped) -force-debug = ["sp-debug-derive/force-debug"] +force-debug = [ "sp-debug-derive/force-debug" ] runtime-benchmarks = [ "cumulus-pallet-session-benchmarking/runtime-benchmarks", @@ -162,11 +162,11 @@ try-runtime = [ "pallet-balances/try-runtime", "pallet-cc-authorities-noting/try-runtime", "pallet-maintenance-mode/try-runtime", + "pallet-proxy/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-utility/try-runtime", - "pallet-proxy/try-runtime", "parachain-info/try-runtime", "polkadot-runtime-common/try-runtime", "sp-runtime/try-runtime", diff --git a/node/Cargo.toml b/node/Cargo.toml index 2b88db0b7..fa65ae422 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -22,10 +22,10 @@ serde_json = { workspace = true } tokio = { workspace = true } # Local -node-common = { workspace = true } ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] } dancebox-runtime = { workspace = true, features = [ "std" ] } manual-xcm-rpc = { workspace = true } +node-common = { workspace = true } pallet-author-noting-runtime-api = { workspace = true, features = [ "std" ] } pallet-collator-assignment-runtime-api = { workspace = true, features = [ "std" ] } pallet-configuration = { workspace = true, features = [ "std" ] } @@ -103,11 +103,7 @@ substrate-build-script-utils = { workspace = true } [features] default = [] -runtime-benchmarks = [ - "dancebox-runtime/runtime-benchmarks", - "polkadot-cli/runtime-benchmarks", - "polkadot-service/runtime-benchmarks" -] +runtime-benchmarks = [ "dancebox-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-service/runtime-benchmarks" ] try-runtime = [ "dancebox-runtime/try-runtime", "nimbus-primitives/try-runtime", "pallet-configuration/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", "sp-runtime/try-runtime", "try-runtime-cli/try-runtime" ] fast-runtime = [ "dancebox-runtime/fast-runtime" ] diff --git a/node/src/service.rs b/node/src/service.rs index 8f6d89e92..ba368deb4 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -16,7 +16,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use node_common::service::NewPartial; +use node_common::service::NodeBuilder; #[allow(deprecated)] use { @@ -137,44 +137,45 @@ pub fn new_partial( >, sc_service::Error, > { - let NewPartial { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - } = node_common::service::new_partial(config)?; - - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - // The nimbus import queue ONLY checks the signature correctness - // Any other checks corresponding to the author-correctness should be done - // in the runtime - let import_queue = nimbus_consensus::import_queue( - client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - false, - )?; - - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: None, - other: (block_import, telemetry, telemetry_worker_handle), - }) + todo!() + // let NodeBuilder { + // client, + // backend, + // transaction_pool, + // telemetry, + // telemetry_worker_handle, + // task_manager, + // keystore_container, + // } = node_common::service::NodeBuilder::new(config)?; + + // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + + // // The nimbus import queue ONLY checks the signature correctness + // // Any other checks corresponding to the author-correctness should be done + // // in the runtime + // let import_queue = nimbus_consensus::import_queue( + // client.clone(), + // block_import.clone(), + // move |_, _| async move { + // let time = sp_timestamp::InherentDataProvider::from_system_time(); + + // Ok((time,)) + // }, + // &task_manager.spawn_essential_handle(), + // config.prometheus_registry(), + // false, + // )?; + + // Ok(PartialComponents { + // backend, + // client, + // import_queue, + // keystore_container, + // task_manager, + // transaction_pool, + // select_chain: None, + // other: (block_import, telemetry, telemetry_worker_handle), + // }) } /// Background task used to detect changes to container chain assignment, @@ -269,37 +270,88 @@ pub fn new_partial_dev( >, sc_service::Error, > { - let NewPartial { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - } = node_common::service::new_partial(config)?; + todo!() + // let NodeBuilder { + // client, + // backend, + // transaction_pool, + // telemetry, + // telemetry_worker_handle, + // task_manager, + // keystore_container, + // } = node_common::service::NodeBuilder::new(config)?; + + // let block_import = DevParachainBlockImport::new(client.clone()); + // let import_queue = build_manual_seal_import_queue( + // client.clone(), + // block_import.clone(), + // config, + // telemetry.as_ref().map(|telemetry| telemetry.handle()), + // &task_manager, + // )?; + + // let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone())); + + // Ok(PartialComponents { + // backend, + // client, + // import_queue, + // keystore_container, + // task_manager, + // transaction_pool, + // select_chain: maybe_select_chain, + // other: (block_import, telemetry, telemetry_worker_handle), + // }) +} - let block_import = DevParachainBlockImport::new(client.clone()); +/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. +/// +/// This is the actual implementation that is abstract over the executor and the runtime api. +#[sc_tracing::logging::prefix_logs_with("Orchestrator")] +async fn start_node_impl2( + orchestrator_config: Configuration, + polkadot_config: Configuration, + container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>, + collator_options: CollatorOptions, + para_id: ParaId, + hwbench: Option, +) -> sc_service::error::Result<(TaskManager, Arc)> { + let parachain_config = prepare_node_config(orchestrator_config); + + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let node_builder = node_common::service::NodeBuilder::new( + ¶chain_config, + polkadot_config, + collator_options.clone(), + hwbench.clone(), + ) + .await?; + + // This node block import. + let block_import = DevParachainBlockImport::new(node_builder.client.clone()); let import_queue = build_manual_seal_import_queue( - client.clone(), + node_builder.client.clone(), block_import.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, + ¶chain_config, + node_builder + .telemetry + .as_ref() + .map(|telemetry| telemetry.handle()), + &node_builder.task_manager, )?; - let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone())); + // Upgrade the NodeBuilder with cumulus capabilities using our block import. + let mut node_builder = node_builder + .build_cumulus_network(¶chain_config, para_id, import_queue) + .await?; - Ok(PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: maybe_select_chain, - other: (block_import, telemetry, telemetry_worker_handle), - }) + node_builder.spawn_common_tasks(¶chain_config)?; + + // let maybe_select_chain = Some(sc_consensus::LongestChain::new( + // node_builder.backend.clone(), + // )); + + todo!() } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. diff --git a/pallets/collator-assignment/Cargo.toml b/pallets/collator-assignment/Cargo.toml index 0e55cb101..be1c8bf7c 100644 --- a/pallets/collator-assignment/Cargo.toml +++ b/pallets/collator-assignment/Cargo.toml @@ -9,10 +9,13 @@ version = "0.1.0" [package.metadata.docs.rs] targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } parity-scale-codec = { workspace = true, features = [ "derive", "max-encoded-len" ] } +rand = { workspace = true } +rand_chacha = { workspace = true } scale-info = { workspace = true } serde = { workspace = true, optional = true, features = [ "derive" ] } sp-core = { workspace = true } @@ -20,9 +23,6 @@ sp-runtime = { workspace = true } sp-std = { workspace = true } tp-collator-assignment = { workspace = true } tp-traits = { workspace = true } -frame-benchmarking = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } [dev-dependencies] sp-io = { workspace = true } @@ -36,10 +36,10 @@ std = [ "parity-scale-codec/std", "scale-info/std", "serde", - "tp-collator-assignment/std", - "tp-traits/std", "sp-runtime/std", "sp-std/std", + "tp-collator-assignment/std", + "tp-traits/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/pallets/pooled-staking/Cargo.toml b/pallets/pooled-staking/Cargo.toml index 31bdfb081..4b2da6707 100644 --- a/pallets/pooled-staking/Cargo.toml +++ b/pallets/pooled-staking/Cargo.toml @@ -52,4 +52,4 @@ std = [ "tp-traits/std", ] runtime-benchmarks = [ "frame-benchmarking" ] -try-runtime = [ "frame-support/try-runtime" ] \ No newline at end of file +try-runtime = [ "frame-support/try-runtime" ] diff --git a/pallets/registrar/Cargo.toml b/pallets/registrar/Cargo.toml index c3502a480..c922f4850 100644 --- a/pallets/registrar/Cargo.toml +++ b/pallets/registrar/Cargo.toml @@ -31,6 +31,6 @@ sp-runtime = { workspace = true } [features] default = [ "std" ] -std = [ "serde/std", "pallet-configuration/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-balances/std", "parity-scale-codec/std", "scale-info/std", "tp-container-chain-genesis-data/std", "tp-traits/std" ] +std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-balances/std", "pallet-configuration/std", "parity-scale-codec/std", "scale-info/std", "serde/std", "tp-container-chain-genesis-data/std", "tp-traits/std" ] runtime-benchmarks = [ "frame-benchmarking", "tp-traits/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime" ] diff --git a/primitives/container-chain-genesis-data/Cargo.toml b/primitives/container-chain-genesis-data/Cargo.toml index 627c8df53..23a87b3d2 100644 --- a/primitives/container-chain-genesis-data/Cargo.toml +++ b/primitives/container-chain-genesis-data/Cargo.toml @@ -35,5 +35,5 @@ polkadot-primitives = { workspace = true, optional = true } [features] default = [ "std" ] -std = [ "serde/std", "cumulus-primitives-core/std", "frame-support/std", "parity-scale-codec/std", "polkadot-primitives", "serde/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", "sp-trie/std", "tp-traits/std" ] +std = [ "cumulus-primitives-core/std", "frame-support/std", "parity-scale-codec/std", "polkadot-primitives", "serde/std", "serde/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", "sp-trie/std", "tp-traits/std" ] json = [ "hex", "serde_json" ] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 996071a5c..7db631602 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -12,12 +12,12 @@ targets = [ "x86_64-unknown-linux-gnu" ] hex-literal = { workspace = true } # Substrate +frame-support = { workspace = true } parity-scale-codec = { workspace = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -frame-support = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } diff --git a/primitives/traits/Cargo.toml b/primitives/traits/Cargo.toml index deb38c889..6307bcd0f 100644 --- a/primitives/traits/Cargo.toml +++ b/primitives/traits/Cargo.toml @@ -16,7 +16,7 @@ cumulus-primitives-core = { workspace = true } [features] default = [ "std" ] std = [ - "frame-support/std", "cumulus-primitives-core/std", + "frame-support/std", ] runtime-benchmarks = [] diff --git a/runtime/dancebox/Cargo.toml b/runtime/dancebox/Cargo.toml index 085d46251..66e7f1fc6 100644 --- a/runtime/dancebox/Cargo.toml +++ b/runtime/dancebox/Cargo.toml @@ -124,67 +124,7 @@ substrate-wasm-builder = { workspace = true } default = [ "std", ] -std = [ - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-timestamp/std", - "cumulus-primitives-utility/std", - "frame-executive/std", - "frame-support/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime/std", - "log/std", - "nimbus-primitives/std", - "pallet-author-noting-runtime-api/std", - "pallet-author-noting/std", - "pallet-authority-assignment/std", - "pallet-balances/std", - "pallet-collator-assignment-runtime-api/std", - "pallet-configuration/std", - "pallet-inflation-rewards/std", - "pallet-initializer/std", - "pallet-invulnerables/std", - "pallet-maintenance-mode/std", - "pallet-migrations/std", - "pallet-proxy/std", - "pallet-registrar-runtime-api/std", - "pallet-registrar/std", - "pallet-session/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-utility/std", - "pallet-xcm/std", - "parachain-info/std", - "parity-scale-codec/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-application-crypto/std", - "sp-application-crypto/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-std/std", - "sp-transaction-pool/std", - "sp-version/std", - "tp-author-noting-inherent/std", - "tp-consensus/std", - "tp-traits/std", - "staging-xcm-builder/std", - "staging-xcm-executor/std", - "staging-xcm/std", - "xcm-primitives/std" -] +std = [ "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", "cumulus-primitives-timestamp/std", "cumulus-primitives-utility/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime/std", "log/std", "nimbus-primitives/std", "pallet-author-noting-runtime-api/std", "pallet-author-noting/std", "pallet-authority-assignment/std", "pallet-balances/std", "pallet-collator-assignment-runtime-api/std", "pallet-configuration/std", "pallet-inflation-rewards/std", "pallet-initializer/std", "pallet-invulnerables/std", "pallet-maintenance-mode/std", "pallet-migrations/std", "pallet-proxy/std", "pallet-registrar-runtime-api/std", "pallet-registrar/std", "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-utility/std", "pallet-xcm/std", "parachain-info/std", "parity-scale-codec/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "scale-info/std", "serde", "sp-api/std", "sp-application-crypto/std", "sp-application-crypto/std", "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "staging-xcm-builder/std", "staging-xcm-executor/std", "staging-xcm/std", "tp-author-noting-inherent/std", "tp-consensus/std", "tp-traits/std", "xcm-primitives/std" ] # Allow to print logs details (no wasm:stripped) force-debug = [ "sp-debug-derive/force-debug" ] @@ -203,8 +143,8 @@ runtime-benchmarks = [ "pallet-pooled-staking/runtime-benchmarks", "pallet-registrar/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "staging-xcm-builder/runtime-benchmarks", ] From df1d4f7544d6dedfabf4f3c0a3c1ab4a6e397478 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 6 Nov 2023 11:24:55 +0100 Subject: [PATCH 08/29] sc_service::spawn_tasks with custom rpc_builder --- client/node-common/src/service.rs | 136 ++++++++++++++++++++++++------ node/src/service.rs | 19 ++++- 2 files changed, 129 insertions(+), 26 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 4c3ff78a4..04633032f 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -13,16 +13,13 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . - -use {futures::FutureExt, sp_offchain::OffchainWorkerApi}; - use { cumulus_client_cli::CollatorOptions, - cumulus_client_service::{ - build_relay_chain_interface, prepare_node_config, CollatorSybilResistance, - }, + cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance}, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, + futures::FutureExt, + jsonrpsee::RpcModule, polkadot_primitives::CollatorPair, sc_client_api::Backend, sc_consensus::ImportQueue, @@ -33,6 +30,7 @@ use { sc_network::{config::FullNetworkConfiguration, NetworkService}, sc_network_sync::SyncingService, sc_network_transactions::TransactionsHandlerController, + sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}, sc_service::{ Configuration, KeystoreContainer, NetworkStarter, TFullBackend, TFullClient, TaskManager, }, @@ -41,6 +39,7 @@ use { sc_utils::mpsc::TracingUnboundedSender, sp_api::ConstructRuntimeApi, sp_block_builder::BlockBuilder, + sp_offchain::OffchainWorkerApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue, std::sync::Arc, }; @@ -67,7 +66,6 @@ macro_rules! T { pub struct CumulusNetwork { pub network: Arc>, pub system_rpc_tx: TracingUnboundedSender>, - pub tx_handler_controller: TransactionsHandlerController, pub start_network: NetworkStarter, pub sync_service: Arc>, } @@ -82,6 +80,9 @@ pub struct NodeBuilder< // caller create the `import_queue` using systems contained in `NodeBuilder`, // then call `build_cumulus_network` with it to generate the cumulus systems. Cumulus = (), + // The `TxHandler` is constructed in `build_cumulus_network` + // and is then consumed when calling `spawn_common_tasks`. + TxHandler = (), > where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -100,10 +101,11 @@ pub struct NodeBuilder< pub collator_key: Option, pub cumulus: Cumulus, + pub tx_handler_controller: TxHandler, } impl - NodeBuilder + NodeBuilder where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -112,6 +114,7 @@ where + BlockBuilder + cumulus_primitives_core::CollectCollationInfo, { + // Refactor: old new_partial + build_relay_chain_interface pub async fn new( parachain_config: &Configuration, polkadot_config: Configuration, @@ -197,16 +200,25 @@ where relay_chain_interface, collator_key, cumulus: (), + tx_handler_controller: (), }) } + /// Given an import queue, calls `cumulus_client_service::build_network` and + /// stores the returned objects in `self.cumulus` and `self.tx_handler_controller`. pub async fn build_cumulus_network( self, parachain_config: &Configuration, para_id: ParaId, import_queue: impl ImportQueue + 'static, ) -> sc_service::error::Result< - NodeBuilder>, + NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + CumulusNetwork, + TransactionsHandlerController, + >, > { let Self { client, @@ -219,6 +231,7 @@ where relay_chain_interface, collator_key, cumulus: (), + tx_handler_controller: (), } = self; let net_config = FullNetworkConfiguration::new(¶chain_config.network); @@ -250,48 +263,121 @@ where cumulus: CumulusNetwork { network, system_rpc_tx, - tx_handler_controller, start_network, sync_service, }, + tx_handler_controller, }) } } impl - NodeBuilder> + NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + CumulusNetwork, + TransactionsHandlerController, + > where Block: cumulus_primitives_core::BlockT, + Block::Hash: Unpin, + Block::Header: Unpin, ParachainNativeExecutor: NativeExecutionDispatch + 'static, RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: - TaggedTransactionQueue + BlockBuilder + OffchainWorkerApi, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + + BlockBuilder + + OffchainWorkerApi + + sp_api::Metadata + + sp_session::SessionKeys, { - pub fn spawn_common_tasks( - &mut self, - parachain_config: &Configuration, - ) -> sc_service::error::Result<()> { + /// Given an `rpc_builder`, spawns the common tasks of a Substrate + Cumulus + /// node. It consumes `self.tx_handler_controller` in the process. + pub fn spawn_common_tasks( + self, + parachain_config: Configuration, + rpc_builder: Box< + dyn Fn( + DenyUnsafe, + SubscriptionTaskExecutor, + ) -> Result, sc_service::Error>, + >, + ) -> sc_service::error::Result< + NodeBuilder, ()>, + > { + let NodeBuilder { + client, + backend, + transaction_pool, + mut telemetry, + telemetry_worker_handle, + mut task_manager, + keystore_container, + relay_chain_interface, + collator_key, + cumulus: + CumulusNetwork { + network, + system_rpc_tx, + start_network, + sync_service, + }, + tx_handler_controller, + } = self; + if parachain_config.offchain_worker.enabled { - self.task_manager.spawn_handle().spawn( + task_manager.spawn_handle().spawn( "offchain-workers-runner", "offchain-work", sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: self.client.clone(), - keystore: Some(self.keystore_container.keystore()), - offchain_db: self.backend.offchain_storage(), + runtime_api_provider: client.clone(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), transaction_pool: Some(OffchainTransactionPoolFactory::new( - self.transaction_pool.clone(), + transaction_pool.clone(), )), - network_provider: self.cumulus.network.clone(), + network_provider: network.clone(), is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], }) - .run(self.client.clone(), self.task_manager.spawn_handle()) + .run(client.clone(), task_manager.spawn_handle()) .boxed(), ); } - Ok(()) + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: keystore_container.keystore(), + backend: backend.clone(), + network: network.clone(), + system_rpc_tx: system_rpc_tx.clone(), + tx_handler_controller, + telemetry: telemetry.as_mut(), + sync_service: sync_service.clone(), + })?; + + Ok(NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + relay_chain_interface, + collator_key, + cumulus: CumulusNetwork { + network, + system_rpc_tx, + start_network, + sync_service, + }, + tx_handler_controller: (), + }) } } diff --git a/node/src/service.rs b/node/src/service.rs index ba368deb4..dc46b0d84 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -345,7 +345,24 @@ async fn start_node_impl2( .build_cumulus_network(¶chain_config, para_id, import_queue) .await?; - node_builder.spawn_common_tasks(¶chain_config)?; + let rpc_builder = { + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + deny_unsafe, + command_sink: None, + xcm_senders: None, + }; + + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; // let maybe_select_chain = Some(sc_consensus::LongestChain::new( // node_builder.backend.clone(), From c3fca0931a4a19f8afda5a22259a109e64835e21 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:30:18 +0100 Subject: [PATCH 09/29] hwbench --- client/node-common/src/service.rs | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 04633032f..5de2b9295 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -13,11 +13,13 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . + use { cumulus_client_cli::CollatorOptions, cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance}, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, + frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, futures::FutureExt, jsonrpsee::RpcModule, polkadot_primitives::CollatorPair, @@ -99,6 +101,7 @@ pub struct NodeBuilder< pub relay_chain_interface: Arc, pub collator_key: Option, + pub hwbench: Option, pub cumulus: Cumulus, pub tx_handler_controller: TxHandler, @@ -199,6 +202,7 @@ where keystore_container, relay_chain_interface, collator_key, + hwbench, cumulus: (), tx_handler_controller: (), }) @@ -230,6 +234,7 @@ where keystore_container, relay_chain_interface, collator_key, + hwbench, cumulus: (), tx_handler_controller: (), } = self; @@ -260,6 +265,7 @@ where keystore_container, relay_chain_interface, collator_key, + hwbench, cumulus: CumulusNetwork { network, system_rpc_tx, @@ -315,6 +321,7 @@ where keystore_container, relay_chain_interface, collator_key, + hwbench, cumulus: CumulusNetwork { network, @@ -325,6 +332,8 @@ where tx_handler_controller, } = self; + let collator = parachain_config.role.is_authority(); + if parachain_config.offchain_worker.enabled { task_manager.spawn_handle().spawn( "offchain-workers-runner", @@ -361,6 +370,27 @@ where sync_service: sync_service.clone(), })?; + if let Some(hwbench) = &hwbench { + sc_sysinfo::print_hwbench(&hwbench); + // Here you can check whether the hardware meets your chains' requirements. Putting a link + // in there and swapping out the requirements for your own are probably a good idea. The + // requirements for a para-chain are dictated by its relay-chain. + if collator && !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + log::warn!( + "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." + ); + } + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench.clone()), + ); + } + } + Ok(NodeBuilder { client, backend, @@ -371,6 +401,7 @@ where keystore_container, relay_chain_interface, collator_key, + hwbench, cumulus: CumulusNetwork { network, system_rpc_tx, From bc397ca845ea37418237c42990d13f6dbf2a3c78 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 6 Nov 2023 15:56:31 +0100 Subject: [PATCH 10/29] use `core_extensions::TypeIdentity` to simplify code --- Cargo.lock | 7 ++ Cargo.toml | 1 + client/node-common/Cargo.toml | 1 + client/node-common/src/service.rs | 119 ++++++++++++++++++------------ 4 files changed, 80 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1d3742b80..fa368431a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1887,6 +1887,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "core_extensions" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c71dc07c9721607e7a16108336048ee978c3a8b129294534272e8bac96c0ee" + [[package]] name = "cpp_demangle" version = "0.3.5" @@ -6846,6 +6852,7 @@ dependencies = [ "async-io", "async-trait", "clap", + "core_extensions", "cumulus-client-cli", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", diff --git a/Cargo.toml b/Cargo.toml index 225699beb..47c9caec4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -234,6 +234,7 @@ smallvec = "1.10.0" async-io = "1.3" async-trait = "0.1" clap = { version = "4.1.6", default-features = false, features = [ "derive" ] } +core_extensions = "1.5.3" exit-future = { version = "0.2.0" } flume = "0.10.9" futures = { version = "0.3.1" } diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml index a251f5143..f31f7a275 100644 --- a/client/node-common/Cargo.toml +++ b/client/node-common/Cargo.toml @@ -15,6 +15,7 @@ jsonrpsee = { workspace = true, features = [ "server" ] } log = { workspace = true } parity-scale-codec = { workspace = true } serde = { workspace = true, features = [ "derive" ] } +core_extensions = { workspace = true, features = [ "type_identity" ]} # Local tc-consensus = { workspace = true } diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 5de2b9295..3596b265a 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -15,6 +15,7 @@ // along with Tanssi. If not, see . use { + core_extensions::TypeIdentity, cumulus_client_cli::CollatorOptions, cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance}, cumulus_primitives_core::ParaId, @@ -72,6 +73,19 @@ pub struct CumulusNetwork { pub sync_service: Arc>, } +// `Cumulus` and `TxHandler` are types that will change during the life of +// a `NodeBuilder` because they are generated and consumed when calling +// certain functions, with absence of data represented with `()`. Some +// function are implemented only for a given concrete type, which ensure it +// can only be called if the required data is available (generated and not yet +// consumed). +// +// While this could be implemented with multiple impl blocks with concrete types, +// we use here `core_extensions::TypeIdentity` which allow to express type +// identity/equality as a trait bound on each function as it removes the +// boilerplate of many impl block with duplicated trait bounds. 2 impl blocks +// are still required since Rust can't infer the types in the `new` function +// that doesn't take `self`. pub struct NodeBuilder< Block, RuntimeApi, @@ -107,6 +121,9 @@ pub struct NodeBuilder< pub tx_handler_controller: TxHandler, } +// `new` function doesn't take self, and the Rust compiler cannot infer that +// only one type T implements `TypeIdentity`. With thus need a separate impl +// block with concrete types `()`. impl NodeBuilder where @@ -115,15 +132,19 @@ where RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, { - // Refactor: old new_partial + build_relay_chain_interface + /// Create a new `NodeBuilder` which prepare objects required to launch a + /// node. However it doesn't start anything, and doesn't provide any + /// cumulus-dependent objects (as it requires an import queue, which usually + /// is different for each node). pub async fn new( parachain_config: &Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, hwbench: Option, ) -> Result { + // Refactor: old new_partial + build_relay_chain_interface + let telemetry = parachain_config .telemetry_endpoints .clone() @@ -203,13 +224,27 @@ where relay_chain_interface, collator_key, hwbench, - cumulus: (), - tx_handler_controller: (), + cumulus: TypeIdentity::from_type(()), + tx_handler_controller: TypeIdentity::from_type(()), }) } +} +impl + NodeBuilder +where + Block: cumulus_primitives_core::BlockT, + ParachainNativeExecutor: NativeExecutionDispatch + 'static, + RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + + BlockBuilder + + cumulus_primitives_core::CollectCollationInfo, +{ /// Given an import queue, calls `cumulus_client_service::build_network` and /// stores the returned objects in `self.cumulus` and `self.tx_handler_controller`. + /// + /// Can only be called once on a `NodeBuilder` that doesn't have yet cumulus + /// data. pub async fn build_cumulus_network( self, parachain_config: &Configuration, @@ -223,7 +258,11 @@ where CumulusNetwork, TransactionsHandlerController, >, - > { + > + where + Cumulus: TypeIdentity, + TxHandler: TypeIdentity, + { let Self { client, backend, @@ -235,8 +274,8 @@ where relay_chain_interface, collator_key, hwbench, - cumulus: (), - tx_handler_controller: (), + cumulus: _, + tx_handler_controller: _, } = self; let net_config = FullNetworkConfiguration::new(¶chain_config.network); @@ -275,30 +314,11 @@ where tx_handler_controller, }) } -} -impl - NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, - CumulusNetwork, - TransactionsHandlerController, - > -where - Block: cumulus_primitives_core::BlockT, - Block::Hash: Unpin, - Block::Header: Unpin, - ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue - + BlockBuilder - + OffchainWorkerApi - + sp_api::Metadata - + sp_session::SessionKeys, -{ /// Given an `rpc_builder`, spawns the common tasks of a Substrate + Cumulus - /// node. It consumes `self.tx_handler_controller` in the process. + /// node. It consumes `self.tx_handler_controller` in the process, which means + /// it can only be called once, and any other code that would need this + /// controller should interact with it before calling this function. pub fn spawn_common_tasks( self, parachain_config: Configuration, @@ -310,7 +330,18 @@ where >, ) -> sc_service::error::Result< NodeBuilder, ()>, - > { + > + where + Cumulus: TypeIdentity>, + TxHandler: TypeIdentity>, + Block::Hash: Unpin, + Block::Header: Unpin, + T![ConstructedRuntimeApi]: TaggedTransactionQueue + + BlockBuilder + + OffchainWorkerApi + + sp_api::Metadata + + sp_session::SessionKeys, + { let NodeBuilder { client, backend, @@ -322,16 +353,13 @@ where relay_chain_interface, collator_key, hwbench, - cumulus: - CumulusNetwork { - network, - system_rpc_tx, - start_network, - sync_service, - }, + cumulus, tx_handler_controller, } = self; + let cumulus = TypeIdentity::into_type(cumulus); + let tx_handler_controller = TypeIdentity::into_type(tx_handler_controller); + let collator = parachain_config.role.is_authority(); if parachain_config.offchain_worker.enabled { @@ -345,7 +373,7 @@ where transaction_pool: Some(OffchainTransactionPoolFactory::new( transaction_pool.clone(), )), - network_provider: network.clone(), + network_provider: cumulus.network.clone(), is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], @@ -363,11 +391,11 @@ where config: parachain_config, keystore: keystore_container.keystore(), backend: backend.clone(), - network: network.clone(), - system_rpc_tx: system_rpc_tx.clone(), + network: cumulus.network.clone(), + system_rpc_tx: cumulus.system_rpc_tx.clone(), tx_handler_controller, telemetry: telemetry.as_mut(), - sync_service: sync_service.clone(), + sync_service: cumulus.sync_service.clone(), })?; if let Some(hwbench) = &hwbench { @@ -402,13 +430,8 @@ where relay_chain_interface, collator_key, hwbench, - cumulus: CumulusNetwork { - network, - system_rpc_tx, - start_network, - sync_service, - }, - tx_handler_controller: (), + cumulus: TypeIdentity::from_type(cumulus), + tx_handler_controller: TypeIdentity::from_type(()), }) } } From 5df2e955da65cf167f0b82ab7032cd1959f3730e Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:22:44 +0100 Subject: [PATCH 11/29] support substrate/cumulus network + manual_seal --- Cargo.lock | 1 + client/node-common/Cargo.toml | 3 +- client/node-common/src/service.rs | 315 +++++++++++++++--- .../templates/frontier/node/src/service.rs | 10 +- node/src/cli.rs | 3 +- node/src/service.rs | 136 +++++--- 6 files changed, 357 insertions(+), 111 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa368431a..e24a199ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6861,6 +6861,7 @@ dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", + "flume", "frame-benchmarking", "frame-benchmarking-cli", "futures 0.3.28", diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml index f31f7a275..78b7c44d5 100644 --- a/client/node-common/Cargo.toml +++ b/client/node-common/Cargo.toml @@ -10,12 +10,13 @@ version = "0.1.0" async-io = { workspace = true } async-trait = { workspace = true } clap = { workspace = true, features = [ "derive" ] } +core_extensions = { workspace = true, features = [ "type_identity" ] } +flume = { workspace = true } futures = { workspace = true } jsonrpsee = { workspace = true, features = [ "server" ] } log = { workspace = true } parity-scale-codec = { workspace = true } serde = { workspace = true, features = [ "derive" ] } -core_extensions = { workspace = true, features = [ "type_identity" ]} # Local tc-consensus = { workspace = true } diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 3596b265a..832c3c0b3 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -15,17 +15,22 @@ // along with Tanssi. If not, see . use { + async_io::Timer, + core::time::Duration, core_extensions::TypeIdentity, cumulus_client_cli::CollatorOptions, cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance}, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, - futures::FutureExt, + futures::{channel::mpsc, FutureExt, Stream, StreamExt}, jsonrpsee::RpcModule, polkadot_primitives::CollatorPair, sc_client_api::Backend, - sc_consensus::ImportQueue, + sc_consensus::{block_import, BlockImport, ImportQueue}, + sc_consensus_manual_seal::{ + run_manual_seal, ConsensusDataProvider, EngineCommand, ManualSealParams, + }, sc_executor::{ HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, @@ -42,9 +47,13 @@ use { sc_utils::mpsc::TracingUnboundedSender, sp_api::ConstructRuntimeApi, sp_block_builder::BlockBuilder, + sp_consensus::{EnableProofRecording, SelectChain}, + sp_core::H256, + sp_inherents::CreateInherentDataProviders, sp_offchain::OffchainWorkerApi, + sp_runtime::Percent, sp_transaction_pool::runtime_api::TaggedTransactionQueue, - std::sync::Arc, + std::{str::FromStr, sync::Arc}, }; /// Functions in this module are generic over `Block`, `RuntimeApi`, and @@ -66,7 +75,7 @@ macro_rules! T { } } -pub struct CumulusNetwork { +pub struct Network { pub network: Arc>, pub system_rpc_tx: TracingUnboundedSender>, pub start_network: NetworkStarter, @@ -90,13 +99,13 @@ pub struct NodeBuilder< Block, RuntimeApi, ParachainNativeExecutor, - // `cumulus_client_service::build_network` returns many important systems, + // `(cumulus_client_service/sc_service)::build_network` returns many important systems, // but can only be called with an `import_queue` which can be different in // each node. For that reason it is a `()` when calling `new`, then the // caller create the `import_queue` using systems contained in `NodeBuilder`, // then call `build_cumulus_network` with it to generate the cumulus systems. - Cumulus = (), - // The `TxHandler` is constructed in `build_cumulus_network` + Network = (), + // The `TxHandler` is constructed in `build_X_network` // and is then consumed when calling `spawn_common_tasks`. TxHandler = (), > where @@ -113,11 +122,10 @@ pub struct NodeBuilder< pub telemetry: Option, pub telemetry_worker_handle: Option, - pub relay_chain_interface: Arc, - pub collator_key: Option, pub hwbench: Option, + pub prometheus_registry: Option, - pub cumulus: Cumulus, + pub network: Network, pub tx_handler_controller: TxHandler, } @@ -130,8 +138,7 @@ where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue - + BlockBuilder + T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, { /// Create a new `NodeBuilder` which prepare objects required to launch a /// node. However it doesn't start anything, and doesn't provide any @@ -139,8 +146,6 @@ where /// is different for each node). pub async fn new( parachain_config: &Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, hwbench: Option, ) -> Result { // Refactor: old new_partial + build_relay_chain_interface @@ -177,7 +182,7 @@ where let executor = ::new_with_wasm_executor(wasm); - let (client, backend, keystore_container, mut task_manager) = + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( parachain_config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), @@ -202,17 +207,6 @@ where client.clone(), ); - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle.clone(), - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - Ok(Self { client, backend, @@ -221,17 +215,16 @@ where telemetry_worker_handle, task_manager, keystore_container, - relay_chain_interface, - collator_key, hwbench, - cumulus: TypeIdentity::from_type(()), + prometheus_registry: parachain_config.prometheus_registry().cloned(), + network: TypeIdentity::from_type(()), tx_handler_controller: TypeIdentity::from_type(()), }) } } -impl - NodeBuilder +impl + NodeBuilder where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -240,28 +233,51 @@ where + BlockBuilder + cumulus_primitives_core::CollectCollationInfo, { + pub async fn build_relay_chain_interface( + &mut self, + parachain_config: &Configuration, + polkadot_config: Configuration, + collator_options: CollatorOptions, + ) -> sc_service::error::Result<( + Arc<(dyn RelayChainInterface + 'static)>, + Option, + )> { + build_relay_chain_interface( + polkadot_config, + ¶chain_config, + self.telemetry_worker_handle.clone(), + &mut self.task_manager, + collator_options.clone(), + self.hwbench.clone(), + ) + .await + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>)) + } + /// Given an import queue, calls `cumulus_client_service::build_network` and - /// stores the returned objects in `self.cumulus` and `self.tx_handler_controller`. + /// stores the returned objects in `self.network` and `self.tx_handler_controller`. /// - /// Can only be called once on a `NodeBuilder` that doesn't have yet cumulus + /// Can only be called once on a `NodeBuilder` that doesn't have yet network /// data. - pub async fn build_cumulus_network( + pub async fn build_cumulus_network( self, parachain_config: &Configuration, para_id: ParaId, import_queue: impl ImportQueue + 'static, + relay_chain_interface: RCInterface, ) -> sc_service::error::Result< NodeBuilder< Block, RuntimeApi, ParachainNativeExecutor, - CumulusNetwork, + Network, TransactionsHandlerController, >, > where - Cumulus: TypeIdentity, + NetworkT: TypeIdentity, TxHandler: TypeIdentity, + RCInterface: RelayChainInterface + Clone + 'static, { let Self { client, @@ -271,10 +287,9 @@ where telemetry_worker_handle, task_manager, keystore_container, - relay_chain_interface, - collator_key, hwbench, - cumulus: _, + prometheus_registry, + network: _, tx_handler_controller: _, } = self; @@ -288,7 +303,7 @@ where spawn_handle: task_manager.spawn_handle(), import_queue: import_queue, para_id, - relay_chain_interface: relay_chain_interface.clone(), + relay_chain_interface: relay_chain_interface, net_config, sybil_resistance_level: CollatorSybilResistance::Resistant, }) @@ -302,10 +317,79 @@ where telemetry_worker_handle, task_manager, keystore_container, - relay_chain_interface, - collator_key, hwbench, - cumulus: CumulusNetwork { + prometheus_registry, + network: Network { + network, + system_rpc_tx, + start_network, + sync_service, + }, + tx_handler_controller, + }) + } + + /// Given an import queue, calls `cumulus_client_service::build_network` and + /// stores the returned objects in `self.network` and `self.tx_handler_controller`. + /// + /// Can only be called once on a `NodeBuilder` that doesn't have yet network + /// data. + pub fn build_substrate_network( + self, + parachain_config: &Configuration, + import_queue: impl ImportQueue + 'static, + ) -> sc_service::error::Result< + NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + Network, + TransactionsHandlerController, + >, + > + where + NetworkT: TypeIdentity, + TxHandler: TypeIdentity, + { + let Self { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network: _, + tx_handler_controller: _, + } = self; + + let net_config = FullNetworkConfiguration::new(¶chain_config.network); + + let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: parachain_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue: import_queue, + warp_sync_params: None, + block_announce_validator_builder: None, + net_config, + })?; + + Ok(NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network: Network { network, system_rpc_tx, start_network, @@ -319,6 +403,7 @@ where /// node. It consumes `self.tx_handler_controller` in the process, which means /// it can only be called once, and any other code that would need this /// controller should interact with it before calling this function. + #[must_use] pub fn spawn_common_tasks( self, parachain_config: Configuration, @@ -329,10 +414,10 @@ where ) -> Result, sc_service::Error>, >, ) -> sc_service::error::Result< - NodeBuilder, ()>, + NodeBuilder, ()>, > where - Cumulus: TypeIdentity>, + NetworkT: TypeIdentity>, TxHandler: TypeIdentity>, Block::Hash: Unpin, Block::Header: Unpin, @@ -350,10 +435,9 @@ where telemetry_worker_handle, mut task_manager, keystore_container, - relay_chain_interface, - collator_key, hwbench, - cumulus, + prometheus_registry, + network: cumulus, tx_handler_controller, } = self; @@ -383,7 +467,7 @@ where ); } - sc_service::spawn_tasks(sc_service::SpawnTasksParams { + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { rpc_builder, client: client.clone(), transaction_pool: transaction_pool.clone(), @@ -427,11 +511,138 @@ where telemetry_worker_handle, task_manager, keystore_container, - relay_chain_interface, - collator_key, hwbench, - cumulus: TypeIdentity::from_type(cumulus), + prometheus_registry, + network: TypeIdentity::from_type(cumulus), tx_handler_controller: TypeIdentity::from_type(()), }) } + + pub fn install_manual_seal( + &mut self, + manual_seal_config: ManualSealConfiguration, + ) -> sc_service::error::Result>>> + where + BI: BlockImport + Send + Sync + 'static, + SC: SelectChain + 'static, + CIDP: CreateInherentDataProviders + 'static, + { + let ManualSealConfiguration { + sealing, + soft_deadline, + block_import, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + } = manual_seal_config; + + let prometheus_registry = self.prometheus_registry.clone(); + + let mut env = sc_basic_authorship::ProposerFactory::new( + self.task_manager.spawn_handle(), + self.client.clone(), + self.transaction_pool.clone(), + prometheus_registry.as_ref(), + self.telemetry.as_ref().map(|x| x.handle()), + ); + + // // Create channels for mocked XCM messages. + // let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); + // let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); + // let xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender)); + let mut command_sink = None; + + if let Some(deadline) = soft_deadline { + env.set_soft_deadline(deadline); + } + + let commands_stream: Box< + dyn Stream> + Send + Sync + Unpin, + > = match sealing { + Sealing::Instant => { + Box::new( + // This bit cribbed from the implementation of instant seal. + self.transaction_pool + .pool() + .validated_pool() + .import_notification_stream() + .map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }), + ) + } + Sealing::Manual => { + let (sink, stream) = futures::channel::mpsc::channel(1000); + // Keep a reference to the other end of the channel. It goes to the RPC. + command_sink = Some(sink); + Box::new(stream) + } + Sealing::Interval(millis) => Box::new(futures::StreamExt::map( + Timer::interval(Duration::from_millis(millis)), + |_| EngineCommand::SealNewBlock { + create_empty: true, + finalize: false, + parent_hash: None, + sender: None, + }, + )), + }; + + self.task_manager.spawn_essential_handle().spawn_blocking( + "authorship_task", + Some("block-authoring"), + run_manual_seal(ManualSealParams { + block_import, + env, + client: self.client.clone(), + pool: self.transaction_pool.clone(), + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }), + ); + + Ok(command_sink) + } +} + +/// Block authoring scheme to be used by the dev service. +#[derive(Debug, Copy, Clone)] +pub enum Sealing { + /// Author a block immediately upon receiving a transaction into the transaction pool + Instant, + /// Author a block upon receiving an RPC command + Manual, + /// Author blocks at a regular interval specified in milliseconds + Interval(u64), +} + +impl FromStr for Sealing { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(match s { + "instant" => Self::Instant, + "manual" => Self::Manual, + s => { + let millis = s + .parse::() + .map_err(|_| "couldn't decode sealing param")?; + Self::Interval(millis) + } + }) + } +} + +pub struct ManualSealConfiguration { + pub sealing: Sealing, + pub block_import: BI, + pub soft_deadline: Option, + pub select_chain: SC, + pub consensus_data_provider: Option>>, + pub create_inherent_data_providers: CIDP, } diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 4b79a51be..9314d9797 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -134,8 +134,8 @@ struct MockTimestampInherentDataProvider; /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. pub fn new_partial( - config: &mut Configuration, - dev_service: bool, + _config: &mut Configuration, + _dev_service: bool, ) -> Result< PartialComponents< ParachainClient, @@ -158,11 +158,11 @@ pub fn new_partial( >, sc_service::Error, > { - // Use ethereum style for subscription ids - config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); - todo!() + // Use ethereum style for subscription ids + // config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); + // let NodeBuilder { // client, // backend, diff --git a/node/src/cli.rs b/node/src/cli.rs index 0310ac28e..6e22566fb 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -15,7 +15,8 @@ // along with Tanssi. If not, see . use { - crate::{chain_spec::RawGenesisConfig, service::Sealing}, + crate::chain_spec::RawGenesisConfig, + node_common::service::Sealing, pallet_registrar_runtime_api::ContainerChainGenesisData, sc_cli::{CliConfiguration, NodeKeyParams, SharedParams}, sc_network::config::MultiaddrWithPeerId, diff --git a/node/src/service.rs b/node/src/service.rs index dc46b0d84..11b116a0f 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -16,8 +16,6 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use node_common::service::NodeBuilder; - #[allow(deprecated)] use { crate::{ @@ -45,8 +43,9 @@ use { cumulus_relay_chain_interface::RelayChainInterface, dancebox_runtime::{opaque::Block, RuntimeApi}, frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, - futures::{channel::mpsc, StreamExt}, + futures::{channel::mpsc, FutureExt, StreamExt}, nimbus_primitives::NimbusPair, + node_common::service::{ManualSealConfiguration, NodeBuilder, Sealing}, pallet_registrar_runtime_api::RegistrarApi, polkadot_cli::ProvideRuntimeApi, polkadot_service::Handle, @@ -63,6 +62,7 @@ use { TaskManager, }, sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorkerHandle}, + sc_transaction_pool_api::OffchainTransactionPoolFactory, sp_api::StorageProof, sp_consensus::SyncOracle, sp_core::{ @@ -80,7 +80,6 @@ use { }, tokio::sync::mpsc::{unbounded_channel, UnboundedSender}, }; -use {futures::FutureExt, sc_transaction_pool_api::OffchainTransactionPoolFactory}; type FullBackend = TFullBackend; type MaybeSelectChain = Option>; @@ -308,24 +307,17 @@ pub fn new_partial_dev( /// /// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Orchestrator")] -async fn start_node_impl2( +async fn start_dev_node_impl2( orchestrator_config: Configuration, - polkadot_config: Configuration, - container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>, - collator_options: CollatorOptions, - para_id: ParaId, + sealing: Sealing, hwbench: Option, + para_id: ParaId, ) -> sc_service::error::Result<(TaskManager, Arc)> { let parachain_config = prepare_node_config(orchestrator_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = node_common::service::NodeBuilder::new( - ¶chain_config, - polkadot_config, - collator_options.clone(), - hwbench.clone(), - ) - .await?; + let node_builder = + node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone()).await?; // This node block import. let block_import = DevParachainBlockImport::new(node_builder.client.clone()); @@ -340,10 +332,9 @@ async fn start_node_impl2( &node_builder.task_manager, )?; - // Upgrade the NodeBuilder with cumulus capabilities using our block import. - let mut node_builder = node_builder - .build_cumulus_network(¶chain_config, para_id, import_queue) - .await?; + // Build a Substrate Network. (not cumulus since it is a dev node, it mocks + // the relaychain) + let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; let rpc_builder = { let client = node_builder.client.clone(); @@ -362,11 +353,80 @@ async fn start_node_impl2( }) }; - node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; + let is_authority = parachain_config.role.is_authority(); + let mut node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; - // let maybe_select_chain = Some(sc_consensus::LongestChain::new( - // node_builder.backend.clone(), - // )); + let mut command_sink = None; + let mut xcm_senders = None; + if is_authority { + let client = node_builder.client.clone(); + let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); + let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); + xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender)); + + command_sink = node_builder.install_manual_seal(ManualSealConfiguration { + block_import, + sealing, + soft_deadline: Some(SOFT_DEADLINE_PERCENT), + select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()), + consensus_data_provider: Some(Box::new( + tc_consensus::OrchestratorManualSealAuraConsensusDataProvider::new( + node_builder.client.clone(), + node_builder.keystore_container.keystore(), + para_id, + ), + )), + create_inherent_data_providers: move |block: H256, ()| { + let current_para_block = client + .number(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); + + let para_ids = client + .runtime_api() + .registered_paras(block) + .expect("registered_paras runtime API should exist") + .into_iter() + .collect(); + + let downward_xcm_receiver = downward_xcm_receiver.clone(); + let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); + + let client_for_xcm = client.clone(); + async move { + let mocked_author_noting = + tp_author_noting_inherent::MockAuthorNotingInherentDataProvider { + current_para_block, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + para_ids, + slots_per_para_block: 1, + }; + + let time = MockTimestampInherentDataProvider; + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + // TODO: Recheck + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: MockXcmConfig::new( + &*client_for_xcm, + block, + para_id, + Default::default(), + ), + raw_downward_messages: downward_xcm_receiver.drain().collect(), + raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), + additional_key_values: Some(mocked_author_noting.get_key_values().clone()), + }; + + Ok((time, mocked_parachain, mocked_author_noting)) + } + }, + })?; + } todo!() } @@ -1602,34 +1662,6 @@ impl IdentifyVariant for Box { } } -/// Block authoring scheme to be used by the dev service. -#[derive(Debug, Copy, Clone)] -pub enum Sealing { - /// Author a block immediately upon receiving a transaction into the transaction pool - Instant, - /// Author a block upon receiving an RPC command - Manual, - /// Author blocks at a regular interval specified in milliseconds - Interval(u64), -} - -impl FromStr for Sealing { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(match s { - "instant" => Self::Instant, - "manual" => Self::Manual, - s => { - let millis = s - .parse::() - .map_err(|_| "couldn't decode sealing param")?; - Self::Interval(millis) - } - }) - } -} - /// Orchestrator Parachain Block import. We cannot use the one in cumulus as it overrides the best /// chain selection rule #[derive(Clone)] From f3e7dbc201bcb21a3c25e03498940c9e597b69fc Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Tue, 7 Nov 2023 13:43:56 +0100 Subject: [PATCH 12/29] use sink/senders in rpc builder --- node/src/service.rs | 93 ++++++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/node/src/service.rs b/node/src/service.rs index 11b116a0f..543c7e3ce 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -114,6 +114,27 @@ thread_local!(static TIMESTAMP: std::cell::RefCell = std::cell::RefCell::ne /// Provide a mock duration starting at 0 in millisecond for timestamp inherent. /// Each call will increment timestamp by slot_duration making Aura think time has passed. struct MockTimestampInherentDataProvider; +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { + async fn provide_inherent_data( + &self, + inherent_data: &mut sp_inherents::InherentData, + ) -> Result<(), sp_inherents::Error> { + TIMESTAMP.with(|x| { + *x.borrow_mut() += dancebox_runtime::SLOT_DURATION; + inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow()) + }) + } + + async fn try_handle_error( + &self, + _identifier: &sp_inherents::InherentIdentifier, + _error: &[u8], + ) -> Option> { + // The pallet never reports error. + None + } +} /// Starts a `ServiceBuilder` for a full service. /// @@ -336,29 +357,11 @@ async fn start_dev_node_impl2( // the relaychain) let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; - let rpc_builder = { - let client = node_builder.client.clone(); - let transaction_pool = node_builder.transaction_pool.clone(); - - Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - command_sink: None, - xcm_senders: None, - }; - - crate::rpc::create_full(deps).map_err(Into::into) - }) - }; - - let is_authority = parachain_config.role.is_authority(); - let mut node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; - + // If we're running a collator dev node we must install manual seal block + // production. let mut command_sink = None; let mut xcm_senders = None; - if is_authority { + if parachain_config.role.is_authority() { let client = node_builder.client.clone(); let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); @@ -428,7 +431,31 @@ async fn start_dev_node_impl2( })?; } - todo!() + // This node RPC builder. + let rpc_builder = { + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = crate::rpc::FullDeps { + client: client.clone(), + pool: transaction_pool.clone(), + deny_unsafe, + command_sink: command_sink.clone(), + xcm_senders: xcm_senders.clone(), + }; + + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + // We spawn all the common substrate tasks to properly run a node. + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; + + // We start the networking part. + node_builder.network.start_network.start_network(); + + Ok((node_builder.task_manager, node_builder.client)) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. @@ -1503,28 +1530,6 @@ pub fn new_dev( let client_set_aside_for_cidp = client.clone(); - #[async_trait::async_trait] - impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { - async fn provide_inherent_data( - &self, - inherent_data: &mut sp_inherents::InherentData, - ) -> Result<(), sp_inherents::Error> { - TIMESTAMP.with(|x| { - *x.borrow_mut() += dancebox_runtime::SLOT_DURATION; - inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow()) - }) - } - - async fn try_handle_error( - &self, - _identifier: &sp_inherents::InherentIdentifier, - _error: &[u8], - ) -> Option> { - // The pallet never reports error. - None - } - } - task_manager.spawn_essential_handle().spawn_blocking( "authorship_task", Some("block-authoring"), From 03555956589a7889262bdfb4eb268211f3e7c7ab Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Tue, 7 Nov 2023 14:11:22 +0100 Subject: [PATCH 13/29] replace new_dev by start_dev_node + remove async --- client/node-common/src/service.rs | 14 +- node/src/command.rs | 2 +- node/src/service.rs | 478 ++++++------------------------ 3 files changed, 101 insertions(+), 393 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 832c3c0b3..3c214252d 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -142,13 +142,14 @@ where { /// Create a new `NodeBuilder` which prepare objects required to launch a /// node. However it doesn't start anything, and doesn't provide any - /// cumulus-dependent objects (as it requires an import queue, which usually + /// network-dependent objects (as it requires an import queue, which usually /// is different for each node). - pub async fn new( + #[must_use] + pub fn new( parachain_config: &Configuration, hwbench: Option, ) -> Result { - // Refactor: old new_partial + build_relay_chain_interface + // Refactor: old new_partial let telemetry = parachain_config .telemetry_endpoints @@ -233,6 +234,7 @@ where + BlockBuilder + cumulus_primitives_core::CollectCollationInfo, { + #[must_use] pub async fn build_relay_chain_interface( &mut self, parachain_config: &Configuration, @@ -259,6 +261,7 @@ where /// /// Can only be called once on a `NodeBuilder` that doesn't have yet network /// data. + #[must_use] pub async fn build_cumulus_network( self, parachain_config: &Configuration, @@ -329,11 +332,12 @@ where }) } - /// Given an import queue, calls `cumulus_client_service::build_network` and + /// Given an import queue, calls `sc_service::build_network` and /// stores the returned objects in `self.network` and `self.tx_handler_controller`. /// /// Can only be called once on a `NodeBuilder` that doesn't have yet network /// data. + #[must_use] pub fn build_substrate_network( self, parachain_config: &Configuration, @@ -399,7 +403,7 @@ where }) } - /// Given an `rpc_builder`, spawns the common tasks of a Substrate + Cumulus + /// Given an `rpc_builder`, spawns the common tasks of a Substrate /// node. It consumes `self.tx_handler_controller` in the process, which means /// it can only be called once, and any other code that would need this /// controller should interact with it before calling this function. diff --git a/node/src/command.rs b/node/src/command.rs index 14abd72e3..3c95cd581 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -466,7 +466,7 @@ pub fn run() -> Result<()> { config.chain_spec.is_dev() || relay_chain_id == Some("dev-service".to_string()) || cli.run.dev_service; if dev_service { - return crate::service::new_dev(config, cli.run.sealing, hwbench, id).map_err(Into::into) + return crate::service::start_dev_node(config, cli.run.sealing, hwbench, id).map_err(Into::into) } let parachain_account = diff --git a/node/src/service.rs b/node/src/service.rs index 543c7e3ce..20fb9836b 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -272,192 +272,6 @@ fn check_assigned_para_id( Ok(()) } -/// Starts a `ServiceBuilder` for a dev service. -pub fn new_partial_dev( - config: &Configuration, -) -> Result< - PartialComponents< - ParachainClient, - ParachainBackend, - MaybeSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - DevParachainBlockImport, - Option, - Option, - ), - >, - sc_service::Error, -> { - todo!() - // let NodeBuilder { - // client, - // backend, - // transaction_pool, - // telemetry, - // telemetry_worker_handle, - // task_manager, - // keystore_container, - // } = node_common::service::NodeBuilder::new(config)?; - - // let block_import = DevParachainBlockImport::new(client.clone()); - // let import_queue = build_manual_seal_import_queue( - // client.clone(), - // block_import.clone(), - // config, - // telemetry.as_ref().map(|telemetry| telemetry.handle()), - // &task_manager, - // )?; - - // let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone())); - - // Ok(PartialComponents { - // backend, - // client, - // import_queue, - // keystore_container, - // task_manager, - // transaction_pool, - // select_chain: maybe_select_chain, - // other: (block_import, telemetry, telemetry_worker_handle), - // }) -} - -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Orchestrator")] -async fn start_dev_node_impl2( - orchestrator_config: Configuration, - sealing: Sealing, - hwbench: Option, - para_id: ParaId, -) -> sc_service::error::Result<(TaskManager, Arc)> { - let parachain_config = prepare_node_config(orchestrator_config); - - // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = - node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone()).await?; - - // This node block import. - let block_import = DevParachainBlockImport::new(node_builder.client.clone()); - let import_queue = build_manual_seal_import_queue( - node_builder.client.clone(), - block_import.clone(), - ¶chain_config, - node_builder - .telemetry - .as_ref() - .map(|telemetry| telemetry.handle()), - &node_builder.task_manager, - )?; - - // Build a Substrate Network. (not cumulus since it is a dev node, it mocks - // the relaychain) - let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; - - // If we're running a collator dev node we must install manual seal block - // production. - let mut command_sink = None; - let mut xcm_senders = None; - if parachain_config.role.is_authority() { - let client = node_builder.client.clone(); - let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); - let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); - xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender)); - - command_sink = node_builder.install_manual_seal(ManualSealConfiguration { - block_import, - sealing, - soft_deadline: Some(SOFT_DEADLINE_PERCENT), - select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()), - consensus_data_provider: Some(Box::new( - tc_consensus::OrchestratorManualSealAuraConsensusDataProvider::new( - node_builder.client.clone(), - node_builder.keystore_container.keystore(), - para_id, - ), - )), - create_inherent_data_providers: move |block: H256, ()| { - let current_para_block = client - .number(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - let para_ids = client - .runtime_api() - .registered_paras(block) - .expect("registered_paras runtime API should exist") - .into_iter() - .collect(); - - let downward_xcm_receiver = downward_xcm_receiver.clone(); - let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); - - let client_for_xcm = client.clone(); - async move { - let mocked_author_noting = - tp_author_noting_inherent::MockAuthorNotingInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - para_ids, - slots_per_para_block: 1, - }; - - let time = MockTimestampInherentDataProvider; - let mocked_parachain = MockValidationDataInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - // TODO: Recheck - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new( - &*client_for_xcm, - block, - para_id, - Default::default(), - ), - raw_downward_messages: downward_xcm_receiver.drain().collect(), - raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), - additional_key_values: Some(mocked_author_noting.get_key_values().clone()), - }; - - Ok((time, mocked_parachain, mocked_author_noting)) - } - }, - })?; - } - - // This node RPC builder. - let rpc_builder = { - let client = node_builder.client.clone(); - let transaction_pool = node_builder.transaction_pool.clone(); - - Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: transaction_pool.clone(), - deny_unsafe, - command_sink: command_sink.clone(), - xcm_senders: xcm_senders.clone(), - }; - - crate::rpc::create_full(deps).map_err(Into::into) - }) - }; - - // We spawn all the common substrate tasks to properly run a node. - let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; - - // We start the networking part. - node_builder.network.start_network.start_network(); - - Ok((node_builder.task_manager, node_builder.client)) -} - /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. @@ -1409,202 +1223,116 @@ pub async fn start_parachain_node( pub const SOFT_DEADLINE_PERCENT: sp_runtime::Percent = sp_runtime::Percent::from_percent(100); -/// Builds a new development service. This service uses manual seal, and mocks -/// the parachain inherent. -pub fn new_dev( - config: Configuration, +/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. +/// +/// This is the actual implementation that is abstract over the executor and the runtime api. +#[sc_tracing::logging::prefix_logs_with("Orchestrator Dev Node")] +pub fn start_dev_node( + orchestrator_config: Configuration, sealing: Sealing, hwbench: Option, para_id: ParaId, -) -> Result { - use { - async_io::Timer, - futures::Stream, - sc_consensus_manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}, - }; - - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain: maybe_select_chain, - transaction_pool, - other: (block_import, mut telemetry, _telemetry_worker_handle), - } = new_partial_dev(&config)?; +) -> sc_service::error::Result { + let parachain_config = prepare_node_config(orchestrator_config); - let net_config = FullNetworkConfiguration::new(&config.network); + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let node_builder = node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone())?; - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: None, - net_config, - })?; + // This node block import. + let block_import = DevParachainBlockImport::new(node_builder.client.clone()); + let import_queue = build_manual_seal_import_queue( + node_builder.client.clone(), + block_import.clone(), + ¶chain_config, + node_builder + .telemetry + .as_ref() + .map(|telemetry| telemetry.handle()), + &node_builder.task_manager, + )?; - if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } + // Build a Substrate Network. (not cumulus since it is a dev node, it mocks + // the relaychain) + let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; - let prometheus_registry = config.prometheus_registry().cloned(); - let collator = config.role.is_authority(); + // If we're running a collator dev node we must install manual seal block + // production. let mut command_sink = None; let mut xcm_senders = None; - - if collator { - let mut env = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); - // Create channels for mocked XCM messages. + if parachain_config.role.is_authority() { + let client = node_builder.client.clone(); let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender)); - env.set_soft_deadline(SOFT_DEADLINE_PERCENT); - let commands_stream: Box> + Send + Sync + Unpin> = - match sealing { - Sealing::Instant => { - Box::new( - // This bit cribbed from the implementation of instant seal. - transaction_pool - .pool() - .validated_pool() - .import_notification_stream() - .map(|_| EngineCommand::SealNewBlock { - create_empty: false, - finalize: false, - parent_hash: None, - sender: None, - }), - ) - } - Sealing::Manual => { - let (sink, stream) = futures::channel::mpsc::channel(1000); - // Keep a reference to the other end of the channel. It goes to the RPC. - command_sink = Some(sink); - Box::new(stream) - } - Sealing::Interval(millis) => Box::new(futures::StreamExt::map( - Timer::interval(Duration::from_millis(millis)), - |_| EngineCommand::SealNewBlock { - create_empty: true, - finalize: false, - parent_hash: None, - sender: None, - }, - )), - }; + command_sink = node_builder.install_manual_seal(ManualSealConfiguration { + block_import, + sealing, + soft_deadline: Some(SOFT_DEADLINE_PERCENT), + select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()), + consensus_data_provider: Some(Box::new( + tc_consensus::OrchestratorManualSealAuraConsensusDataProvider::new( + node_builder.client.clone(), + node_builder.keystore_container.keystore(), + para_id, + ), + )), + create_inherent_data_providers: move |block: H256, ()| { + let current_para_block = client + .number(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); - let select_chain = maybe_select_chain.expect( - "`new_partial` builds a `LongestChainRule` when building dev service.\ - We specified the dev service when calling `new_partial`.\ - Therefore, a `LongestChainRule` is present. qed.", - ); + let para_ids = client + .runtime_api() + .registered_paras(block) + .expect("registered_paras runtime API should exist") + .into_iter() + .collect(); - let client_set_aside_for_cidp = client.clone(); + let downward_xcm_receiver = downward_xcm_receiver.clone(); + let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); - task_manager.spawn_essential_handle().spawn_blocking( - "authorship_task", - Some("block-authoring"), - run_manual_seal(ManualSealParams { - block_import, - env, - client: client.clone(), - pool: transaction_pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: Some(Box::new( - tc_consensus::OrchestratorManualSealAuraConsensusDataProvider::new( - client.clone(), - keystore_container.keystore(), - para_id, - ), - )), - create_inherent_data_providers: move |block: H256, ()| { - let current_para_block = client_set_aside_for_cidp - .number(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - let para_ids = client_set_aside_for_cidp - .runtime_api() - .registered_paras(block) - .expect("registered_paras runtime API should exist") - .into_iter() - .collect(); - - let downward_xcm_receiver = downward_xcm_receiver.clone(); - let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); - - let client_for_xcm = client_set_aside_for_cidp.clone(); - async move { - let mocked_author_noting = - tp_author_noting_inherent::MockAuthorNotingInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - para_ids, - slots_per_para_block: 1, - }; - - let time = MockTimestampInherentDataProvider; - let mocked_parachain = MockValidationDataInherentDataProvider { + let client_for_xcm = client.clone(); + async move { + let mocked_author_noting = + tp_author_noting_inherent::MockAuthorNotingInherentDataProvider { current_para_block, relay_offset: 1000, relay_blocks_per_para_block: 2, - // TODO: Recheck - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new( - &*client_for_xcm, - block, - para_id, - Default::default(), - ), - raw_downward_messages: downward_xcm_receiver.drain().collect(), - raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), - additional_key_values: Some( - mocked_author_noting.get_key_values().clone(), - ), + para_ids, + slots_per_para_block: 1, }; - Ok((time, mocked_parachain, mocked_author_noting)) - } - }, - }), - ); + let time = MockTimestampInherentDataProvider; + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + // TODO: Recheck + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: MockXcmConfig::new( + &*client_for_xcm, + block, + para_id, + Default::default(), + ), + raw_downward_messages: downward_xcm_receiver.drain().collect(), + raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), + additional_key_values: Some(mocked_author_noting.get_key_values().clone()), + }; + + Ok((time, mocked_parachain, mocked_author_noting)) + } + }, + })?; } + // This node RPC builder. let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { @@ -1619,39 +1347,15 @@ pub fn new_dev( }) }; - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client, - transaction_pool, - task_manager: &mut task_manager, - config, - keystore: keystore_container.keystore(), - backend, - network, - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - sync_service, - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } + // We spawn all the common substrate tasks to properly run a node. + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; log::info!("Development Service Ready"); - start_network.start_network(); + // We start the networking part. + node_builder.network.start_network.start_network(); - Ok(task_manager) + Ok(node_builder.task_manager) } /// Can be called for a `Configuration` to check if it is a configuration for From 83b963a7d8cc6af8e506f7bde0470d86800db31c Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Tue, 7 Nov 2023 18:06:32 +0100 Subject: [PATCH 14/29] NodeBuilder::start_full_node --- client/node-common/src/service.rs | 177 ++++++++++--- node/src/service.rs | 422 ++++++++++++++++-------------- 2 files changed, 369 insertions(+), 230 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 3c214252d..8a000e084 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -19,7 +19,9 @@ use { core::time::Duration, core_extensions::TypeIdentity, cumulus_client_cli::CollatorOptions, - cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance}, + cumulus_client_service::{ + build_relay_chain_interface, CollatorSybilResistance, StartFullNodeParams, + }, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, @@ -27,7 +29,7 @@ use { jsonrpsee::RpcModule, polkadot_primitives::CollatorPair, sc_client_api::Backend, - sc_consensus::{block_import, BlockImport, ImportQueue}, + sc_consensus::{import_queue::ImportQueueService, BlockImport, ImportQueue}, sc_consensus_manual_seal::{ run_manual_seal, ConsensusDataProvider, EngineCommand, ManualSealParams, }, @@ -35,7 +37,7 @@ use { HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY, }, - sc_network::{config::FullNetworkConfiguration, NetworkService}, + sc_network::{config::FullNetworkConfiguration, NetworkBlock, NetworkService}, sc_network_sync::SyncingService, sc_network_transactions::TransactionsHandlerController, sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}, @@ -67,19 +69,7 @@ macro_rules! T { [ConstructedRuntimeApi] => { >::RuntimeApi }; - [Where] => { - Block: cumulus_primitives_core::BlockT, - ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, - } -} - -pub struct Network { - pub network: Arc>, - pub system_rpc_tx: TracingUnboundedSender>, - pub start_network: NetworkStarter, - pub sync_service: Arc>, + [ImportQueueService] => { Box> } } // `Cumulus` and `TxHandler` are types that will change during the life of @@ -104,10 +94,16 @@ pub struct NodeBuilder< // each node. For that reason it is a `()` when calling `new`, then the // caller create the `import_queue` using systems contained in `NodeBuilder`, // then call `build_cumulus_network` with it to generate the cumulus systems. - Network = (), + SNetwork = (), // The `TxHandler` is constructed in `build_X_network` // and is then consumed when calling `spawn_common_tasks`. - TxHandler = (), + STxHandler = (), + // The import queue service is obtained from the import queue in + // `build_cumulus_network` or `build_substrate_network`, which also + // consumes the import queue. Neither of them are clonable, so we need to + // to store the service here to be able to consume it later in + // `start_full_node`. + SImportQueueService = (), > where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -125,15 +121,23 @@ pub struct NodeBuilder< pub hwbench: Option, pub prometheus_registry: Option, - pub network: Network, - pub tx_handler_controller: TxHandler, + pub network: SNetwork, + pub tx_handler_controller: STxHandler, + pub import_queue_service: SImportQueueService, +} + +pub struct Network { + pub network: Arc>, + pub system_rpc_tx: TracingUnboundedSender>, + pub start_network: NetworkStarter, + pub sync_service: Arc>, } // `new` function doesn't take self, and the Rust compiler cannot infer that // only one type T implements `TypeIdentity`. With thus need a separate impl // block with concrete types `()`. impl - NodeBuilder + NodeBuilder where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -220,12 +224,20 @@ where prometheus_registry: parachain_config.prometheus_registry().cloned(), network: TypeIdentity::from_type(()), tx_handler_controller: TypeIdentity::from_type(()), + import_queue_service: TypeIdentity::from_type(()), }) } } -impl - NodeBuilder +impl + NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + SNetwork, + STxHandler, + SImportQueueService, + > where Block: cumulus_primitives_core::BlockT, ParachainNativeExecutor: NativeExecutionDispatch + 'static, @@ -275,11 +287,13 @@ where ParachainNativeExecutor, Network, TransactionsHandlerController, + T![ImportQueueService], >, > where - NetworkT: TypeIdentity, - TxHandler: TypeIdentity, + SNetwork: TypeIdentity, + STxHandler: TypeIdentity, + SImportQueueService: TypeIdentity, RCInterface: RelayChainInterface + Clone + 'static, { let Self { @@ -294,9 +308,11 @@ where prometheus_registry, network: _, tx_handler_controller: _, + import_queue_service: _, } = self; let net_config = FullNetworkConfiguration::new(¶chain_config.network); + let import_queue_service = import_queue.service(); let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { @@ -329,6 +345,7 @@ where sync_service, }, tx_handler_controller, + import_queue_service, }) } @@ -349,11 +366,13 @@ where ParachainNativeExecutor, Network, TransactionsHandlerController, + T![ImportQueueService], >, > where - NetworkT: TypeIdentity, - TxHandler: TypeIdentity, + SNetwork: TypeIdentity, + STxHandler: TypeIdentity, + SImportQueueService: TypeIdentity, { let Self { client, @@ -367,9 +386,11 @@ where prometheus_registry, network: _, tx_handler_controller: _, + import_queue_service: _, } = self; let net_config = FullNetworkConfiguration::new(¶chain_config.network); + let import_queue_service = import_queue.service(); let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -400,6 +421,7 @@ where sync_service, }, tx_handler_controller, + import_queue_service, }) } @@ -418,11 +440,18 @@ where ) -> Result, sc_service::Error>, >, ) -> sc_service::error::Result< - NodeBuilder, ()>, + NodeBuilder< + Block, + RuntimeApi, + ParachainNativeExecutor, + Network, + (), + SImportQueueService, + >, > where - NetworkT: TypeIdentity>, - TxHandler: TypeIdentity>, + SNetwork: TypeIdentity>, + STxHandler: TypeIdentity>, Block::Hash: Unpin, Block::Header: Unpin, T![ConstructedRuntimeApi]: TaggedTransactionQueue @@ -441,11 +470,12 @@ where keystore_container, hwbench, prometheus_registry, - network: cumulus, + network, tx_handler_controller, + import_queue_service, } = self; - let cumulus = TypeIdentity::into_type(cumulus); + let network = TypeIdentity::into_type(network); let tx_handler_controller = TypeIdentity::into_type(tx_handler_controller); let collator = parachain_config.role.is_authority(); @@ -461,7 +491,7 @@ where transaction_pool: Some(OffchainTransactionPoolFactory::new( transaction_pool.clone(), )), - network_provider: cumulus.network.clone(), + network_provider: network.network.clone(), is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], @@ -479,11 +509,11 @@ where config: parachain_config, keystore: keystore_container.keystore(), backend: backend.clone(), - network: cumulus.network.clone(), - system_rpc_tx: cumulus.system_rpc_tx.clone(), + network: network.network.clone(), + system_rpc_tx: network.system_rpc_tx.clone(), tx_handler_controller, telemetry: telemetry.as_mut(), - sync_service: cumulus.sync_service.clone(), + sync_service: network.sync_service.clone(), })?; if let Some(hwbench) = &hwbench { @@ -517,8 +547,9 @@ where keystore_container, hwbench, prometheus_registry, - network: TypeIdentity::from_type(cumulus), + network: TypeIdentity::from_type(network), tx_handler_controller: TypeIdentity::from_type(()), + import_queue_service, }) } @@ -612,6 +643,78 @@ where Ok(command_sink) } + + pub fn start_full_node<'a, RCInterface>( + self, + para_id: ParaId, + relay_chain_interface: RCInterface, + relay_chain_slot_duration: Duration, + ) -> sc_service::error::Result< + NodeBuilder, + > + where + SNetwork: TypeIdentity>, + SImportQueueService: TypeIdentity, + RCInterface: RelayChainInterface + Clone + 'static, + { + let NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + mut task_manager, + keystore_container, + hwbench, + prometheus_registry, + network, + tx_handler_controller, + import_queue_service, + } = self; + + let network = TypeIdentity::into_type(network); + let import_queue_service = TypeIdentity::into_type(import_queue_service); + + let announce_block = { + let sync_service = network.sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + let params = StartFullNodeParams { + client: client.clone(), + announce_block, + task_manager: &mut task_manager, + para_id, + relay_chain_interface: relay_chain_interface.clone(), + relay_chain_slot_duration, + import_queue: import_queue_service, + recovery_handle: Box::new(overseer_handle), + sync_service: network.sync_service.clone(), + }; + + // TODO: change for async backing + #[allow(deprecated)] + cumulus_client_service::start_full_node(params)?; + + Ok(NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network: TypeIdentity::from_type(network), + tx_handler_controller, + import_queue_service: (), + }) + } } /// Block authoring scheme to be used by the dev service. diff --git a/node/src/service.rs b/node/src/service.rs index 20fb9836b..fb2679e20 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -293,69 +293,73 @@ async fn start_node_impl( // Channel to send messages to start/stop container chains let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); - let params = new_partial(¶chain_config)?; - let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let mut node_builder = + node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone())?; + + // The nimbus import queue ONLY checks the signature correctness + // Any other checks corresponding to the author-correctness should be done + // in the runtime + let block_import = + ParachainBlockImport::new(node_builder.client.clone(), node_builder.backend.clone()); + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); - let (relay_chain_interface, collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + )?; - let force_authoring = parachain_config.force_authoring; - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); + // let params = new_partial(¶chain_config)?; + // let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - parachain_config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue: params.import_queue, - para_id, - relay_chain_interface: relay_chain_interface.clone(), - net_config, - sybil_resistance_level: CollatorSybilResistance::Resistant, - }) + // let client = params.client.clone(); + // let backend = params.backend.clone(); + // let mut task_manager = params.task_manager; + + let (relay_chain_interface, collator_key) = node_builder + .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) .await?; - if parachain_config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(params.keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: parachain_config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } + // let force_authoring = parachain_config.force_authoring; + let validator = parachain_config.role.is_authority(); + // let prometheus_registry = parachain_config.prometheus_registry().cloned(); + // let transaction_pool = params.transaction_pool.clone(); + // let import_queue_service = params.import_queue.service(); + // let net_config = FullNetworkConfiguration::new(¶chain_config.network); + + // let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = + // cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { + // parachain_config: ¶chain_config, + // client: client.clone(), + // transaction_pool: transaction_pool.clone(), + // spawn_handle: task_manager.spawn_handle(), + // import_queue: params.import_queue, + // para_id, + // relay_chain_interface: relay_chain_interface.clone(), + // net_config, + // sybil_resistance_level: CollatorSybilResistance::Resistant, + // }) + // .await?; + + let node_builder = node_builder + .build_cumulus_network( + ¶chain_config, + para_id, + import_queue, + relay_chain_interface.clone(), + ) + .await?; let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { @@ -370,46 +374,69 @@ async fn start_node_impl( }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend: backend.clone(), - network: network.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - sync_service: sync_service.clone(), - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - // Here you can check whether the hardware meets your chains' requirements. Putting a link - // in there and swapping out the requirements for your own are probably a good idea. The - // requirements for a para-chain are dictated by its relay-chain. - if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && validator { - log::warn!( - "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." - ); - } - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + // if parachain_config.offchain_worker.enabled { + // node_builder.task_manager.spawn_handle().spawn( + // "offchain-workers-runner", + // "offchain-work", + // sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + // runtime_api_provider: node_builder.client.clone(), + // keystore: Some(node_builder.keystore_container.keystore()), + // offchain_db: node_builder.backend.offchain_storage(), + // transaction_pool: Some(OffchainTransactionPoolFactory::new( + // node_builder.transaction_pool.clone(), + // )), + // network_provider: node_builder.network.network.clone(), + // is_validator: parachain_config.role.is_authority(), + // enable_http_requests: false, + // custom_extensions: move |_| vec![], + // }) + // .run(node_builder.client.clone(), node_builder.task_manager.spawn_handle()) + // .boxed(), + // ); + // } + + // sc_service::spawn_tasks(sc_service::SpawnTasksParams { + // rpc_builder, + // client: client.clone(), + // transaction_pool: transaction_pool.clone(), + // task_manager: &mut task_manager, + // config: parachain_config, + // keystore: params.keystore_container.keystore(), + // backend: backend.clone(), + // network: network.clone(), + // system_rpc_tx, + // tx_handler_controller, + // telemetry: telemetry.as_mut(), + // sync_service: sync_service.clone(), + // })?; + + // if let Some(hwbench) = hwbench { + // sc_sysinfo::print_hwbench(&hwbench); + // // Here you can check whether the hardware meets your chains' requirements. Putting a link + // // in there and swapping out the requirements for your own are probably a good idea. The + // // requirements for a para-chain are dictated by its relay-chain. + // if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && validator { + // log::warn!( + // "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." + // ); + // } + + // if let Some(ref mut telemetry) = telemetry { + // let telemetry_handle = telemetry.handle(); + // task_manager.spawn_handle().spawn( + // "telemetry_hwbench", + // None, + // sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + // ); + // } + // } + + // let announce_block = { + // let sync_service = sync_service.clone(); + // Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + // }; let relay_chain_slot_duration = Duration::from_secs(6); @@ -417,104 +444,112 @@ async fn start_node_impl( .overseer_handle() .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - let orchestrator_chain_interface_builder = OrchestratorChainInProcessInterfaceBuilder { - client: client.clone(), - backend: backend.clone(), - sync_oracle: sync_service.clone(), - overseer_handle: overseer_handle.clone(), - }; - - let sync_keystore = params.keystore_container.keystore(); - let mut collate_on_tanssi = None; - - if validator { - let parachain_consensus = build_consensus_orchestrator( - client.clone(), - block_import, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - sync_service.clone(), - params.keystore_container.keystore(), - force_authoring, - para_id, - )?; - - // Start task which detects para id assignment, and starts/stops container chains. - // Note that if this node was started without a `container_chain_config`, we don't - // support collation on container chains, so there is no need to detect changes to assignment - if container_chain_config.is_some() { - build_check_assigned_para_id( - client.clone(), - sync_keystore.clone(), - cc_spawn_tx.clone(), - task_manager.spawn_essential_handle(), - ); - } - - let spawner = task_manager.spawn_handle(); - let params = StartCollatorParams { - para_id, - block_status: client.clone(), - announce_block: announce_block.clone(), - client: client.clone(), - task_manager: &mut task_manager, - relay_chain_interface: relay_chain_interface.clone(), - spawner: spawner.clone(), - parachain_consensus: parachain_consensus.clone(), - import_queue: import_queue_service, - collator_key: collator_key - .clone() - .expect("Command line arguments do not allow this. qed"), - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle.clone()), - sync_service, - }; - - let client = client.clone(); - let collator_key = collator_key.clone(); - // TODO: change for async backing - collate_on_tanssi = Some(move || async move { - #[allow(deprecated)] - cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams { - runtime_api: client.clone(), - block_status: client.clone(), - announce_block, - overseer_handle, - spawner, - para_id, - key: collator_key - .clone() - .expect("Command line arguments do not allow this. qed"), - parachain_consensus, - }) - .await; - }); - - // TODO: change for async backing - #[allow(deprecated)] - start_collator(params).await?; + // let sync_keystore = node_builder.keystore_container.keystore(); + // let mut collate_on_tanssi = None; + + let mut node_builder = if validator { + todo!("node_builder.start_collator"); + + // let parachain_consensus = build_consensus_orchestrator( + // node_builder.client.clone(), + // block_import, + // node_builder.prometheus_registry.as_ref(), + // node_builder.telemetry.as_ref().map(|t| t.handle()), + // &node_builder.task_manager, + // relay_chain_interface.clone(), + // node_builder.transaction_pool, + // node_builder.network.sync_service.clone(), + // node_builder.keystore_container.keystore(), + // force_authoring, + // para_id, + // )?; + + // // Start task which detects para id assignment, and starts/stops container chains. + // // Note that if this node was started without a `container_chain_config`, we don't + // // support collation on container chains, so there is no need to detect changes to assignment + // if container_chain_config.is_some() { + // build_check_assigned_para_id( + // node_builder.client.clone(), + // sync_keystore.clone(), + // cc_spawn_tx.clone(), + // node_builder.ask_manager.spawn_essential_handle(), + // ); + // } + + // let spawner = task_manager.spawn_handle(); + // let params = StartCollatorParams { + // para_id, + // block_status: node_builder.client.clone(), + // announce_block: announce_block.clone(), + // client: node_builder.client.clone(), + // task_manager: &mut node_builder.task_manager, + // relay_chain_interface: relay_chain_interface.clone(), + // spawner: spawner.clone(), + // parachain_consensus: parachain_consensus.clone(), + // import_queue: import_queue_service, + // collator_key: collator_key + // .clone() + // .expect("Command line arguments do not allow this. qed"), + // relay_chain_slot_duration, + // recovery_handle: Box::new(overseer_handle.clone()), + // sync_service, + // }; + + // let client = client.clone(); + // let collator_key = collator_key.clone(); + // // TODO: change for async backing + // collate_on_tanssi = Some(move || async move { + // #[allow(deprecated)] + // cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams { + // runtime_api: client.clone(), + // block_status: client.clone(), + // announce_block, + // overseer_handle, + // spawner, + // para_id, + // key: collator_key + // .clone() + // .expect("Command line arguments do not allow this. qed"), + // parachain_consensus, + // }) + // .await; + // }); + + // // TODO: change for async backing + // #[allow(deprecated)] + // start_collator(params).await?; } else { - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, + node_builder.start_full_node( para_id, - relay_chain_interface: relay_chain_interface.clone(), + relay_chain_interface.clone(), relay_chain_slot_duration, - import_queue: import_queue_service, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; + )? + // let params = StartFullNodeParams { + // client: client.clone(), + // announce_block, + // task_manager: &mut task_manager, + // para_id, + // relay_chain_interface: relay_chain_interface.clone(), + // relay_chain_slot_duration, + // import_queue: import_queue_service, + // recovery_handle: Box::new(overseer_handle), + // sync_service, + // }; + + // // TODO: change for async backing + // #[allow(deprecated)] + // start_full_node(params)?; + }; - // TODO: change for async backing - #[allow(deprecated)] - start_full_node(params)?; - } + node_builder.network.start_network.start_network(); - start_network.start_network(); + let sync_keystore = node_builder.keystore_container.keystore(); + let orchestrator_chain_interface_builder = OrchestratorChainInProcessInterfaceBuilder { + client: node_builder.client.clone(), + backend: node_builder.backend.clone(), + sync_oracle: node_builder.network.sync_service.clone(), + overseer_handle: overseer_handle.clone(), + }; if let Some((container_chain_cli, tokio_handle)) = container_chain_config { // If the orchestrator chain is running as a full-node, we start a full node for the @@ -533,8 +568,8 @@ async fn start_node_impl( } // Start container chain spawner task. This will start and stop container chains on demand. - let orchestrator_client = client.clone(); - let spawn_handle = task_manager.spawn_handle(); + let orchestrator_client = node_builder.client.clone(); + let spawn_handle = node_builder.task_manager.spawn_handle(); let container_chain_spawner = ContainerChainSpawner { orchestrator_chain_interface: orchestrator_chain_interface_builder.build(), orchestrator_client, @@ -549,24 +584,25 @@ async fn start_node_impl( validator, spawn_handle, state: Default::default(), - collate_on_tanssi: Arc::new(move || Box::pin((collate_on_tanssi.clone().unwrap())())), + collate_on_tanssi: todo!(), + // collate_on_tanssi: Arc::new(move || Box::pin((collate_on_tanssi.clone().unwrap())())), }; let state = container_chain_spawner.state.clone(); - task_manager.spawn_essential_handle().spawn( + node_builder.task_manager.spawn_essential_handle().spawn( "container-chain-spawner-rx-loop", None, container_chain_spawner.rx_loop(cc_spawn_rx), ); - task_manager.spawn_essential_handle().spawn( + node_builder.task_manager.spawn_essential_handle().spawn( "container-chain-spawner-debug-state", None, crate::container_chain_monitor::monitor_task(state), ) } - Ok((task_manager, client)) + Ok((node_builder.task_manager, node_builder.client)) } // Log string that will be shown for the container chain: `[Container-2000]`. From ea51604e55d92f4d35afafbeba40edffef09f91f Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Wed, 8 Nov 2023 11:12:05 +0100 Subject: [PATCH 15/29] replace macro with config trait+type --- client/node-common/src/service.rs | 160 +++++++++++++----------------- node/src/service.rs | 12 ++- 2 files changed, 79 insertions(+), 93 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 8a000e084..18782baeb 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -49,8 +49,7 @@ use { sc_utils::mpsc::TracingUnboundedSender, sp_api::ConstructRuntimeApi, sp_block_builder::BlockBuilder, - sp_consensus::{EnableProofRecording, SelectChain}, - sp_core::H256, + sp_consensus::SelectChain, sp_inherents::CreateInherentDataProviders, sp_offchain::OffchainWorkerApi, sp_runtime::Percent, @@ -58,20 +57,24 @@ use { std::{str::FromStr, sync::Arc}, }; -/// Functions in this module are generic over `Block`, `RuntimeApi`, and -/// `ParachainNativeExecutor`. Using type aliases requires them to be -/// generic too, which makes them still verbose to use. For that reason we use -/// a macro that expect the above types to already be in scope. -macro_rules! T { - [Executor] => { NativeElseWasmExecutor }; - [Client] => { TFullClient }; - [Backend] => { TFullBackend }; - [ConstructedRuntimeApi] => { - >::RuntimeApi - }; - [ImportQueueService] => { Box> } +pub trait Config { + type Block; + type RuntimeApi; + type ParachainNativeExecutor; } +pub type BlockOf = ::Block; +pub type BlockHashOf = as cumulus_primitives_core::BlockT>::Hash; +pub type BlockHeaderOf = as cumulus_primitives_core::BlockT>::Header; +pub type RuntimeApiOf = ::RuntimeApi; +pub type ParachainNativeExecutorOf = ::ParachainNativeExecutor; +pub type ExecutorOf = NativeElseWasmExecutor>; +pub type ClientOf = TFullClient, RuntimeApiOf, ExecutorOf>; +pub type BackendOf = TFullBackend>; +pub type ConstructedRuntimeApiOf = + as ConstructRuntimeApi, ClientOf>>::RuntimeApi; +pub type ImportQueueServiceOf = Box>>; + // `Cumulus` and `TxHandler` are types that will change during the life of // a `NodeBuilder` because they are generated and consumed when calling // certain functions, with absence of data represented with `()`. Some @@ -86,9 +89,7 @@ macro_rules! T { // are still required since Rust can't infer the types in the `new` function // that doesn't take `self`. pub struct NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, + T: Config, // `(cumulus_client_service/sc_service)::build_network` returns many important systems, // but can only be called with an `import_queue` which can be different in // each node. For that reason it is a `()` when calling `new`, then the @@ -105,16 +106,16 @@ pub struct NodeBuilder< // `start_full_node`. SImportQueueService = (), > where - Block: cumulus_primitives_core::BlockT, - ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, + BlockOf: cumulus_primitives_core::BlockT, + ParachainNativeExecutorOf: NativeExecutionDispatch + 'static, + RuntimeApiOf: ConstructRuntimeApi, ClientOf> + Sync + Send + 'static, + ConstructedRuntimeApiOf: TaggedTransactionQueue> + BlockBuilder>, { - pub client: Arc, - pub backend: Arc, + pub client: Arc>, + pub backend: Arc>, pub task_manager: TaskManager, pub keystore_container: KeystoreContainer, - pub transaction_pool: Arc>, + pub transaction_pool: Arc, ClientOf>>, pub telemetry: Option, pub telemetry_worker_handle: Option, @@ -136,13 +137,12 @@ pub struct Network { // `new` function doesn't take self, and the Rust compiler cannot infer that // only one type T implements `TypeIdentity`. With thus need a separate impl // block with concrete types `()`. -impl - NodeBuilder +impl NodeBuilder where - Block: cumulus_primitives_core::BlockT, - ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder, + BlockOf: cumulus_primitives_core::BlockT, + ParachainNativeExecutorOf: NativeExecutionDispatch + 'static, + RuntimeApiOf: ConstructRuntimeApi, ClientOf> + Sync + Send + 'static, + ConstructedRuntimeApiOf: TaggedTransactionQueue> + BlockBuilder>, { /// Create a new `NodeBuilder` which prepare objects required to launch a /// node. However it doesn't start anything, and doesn't provide any @@ -185,10 +185,10 @@ where .with_runtime_cache_size(parachain_config.runtime_cache_size) .build(); - let executor = ::new_with_wasm_executor(wasm); + let executor = ExecutorOf::::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( + sc_service::new_full_parts::, RuntimeApiOf, _>( parachain_config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, @@ -229,22 +229,15 @@ where } } -impl - NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, - SNetwork, - STxHandler, - SImportQueueService, - > +impl + NodeBuilder where - Block: cumulus_primitives_core::BlockT, - ParachainNativeExecutor: NativeExecutionDispatch + 'static, - RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static, - T![ConstructedRuntimeApi]: TaggedTransactionQueue - + BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, + BlockOf: cumulus_primitives_core::BlockT, + ParachainNativeExecutorOf: NativeExecutionDispatch + 'static, + RuntimeApiOf: ConstructRuntimeApi, ClientOf> + Sync + Send + 'static, + ConstructedRuntimeApiOf: TaggedTransactionQueue> + + BlockBuilder> + + cumulus_primitives_core::CollectCollationInfo>, { #[must_use] pub async fn build_relay_chain_interface( @@ -278,16 +271,14 @@ where self, parachain_config: &Configuration, para_id: ParaId, - import_queue: impl ImportQueue + 'static, + import_queue: impl ImportQueue> + 'static, relay_chain_interface: RCInterface, ) -> sc_service::error::Result< NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, - Network, - TransactionsHandlerController, - T![ImportQueueService], + T, + Network>, + TransactionsHandlerController>, + ImportQueueServiceOf, >, > where @@ -358,15 +349,13 @@ where pub fn build_substrate_network( self, parachain_config: &Configuration, - import_queue: impl ImportQueue + 'static, + import_queue: impl ImportQueue> + 'static, ) -> sc_service::error::Result< NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, - Network, - TransactionsHandlerController, - T![ImportQueueService], + T, + Network>, + TransactionsHandlerController>, + ImportQueueServiceOf, >, > where @@ -439,26 +428,17 @@ where SubscriptionTaskExecutor, ) -> Result, sc_service::Error>, >, - ) -> sc_service::error::Result< - NodeBuilder< - Block, - RuntimeApi, - ParachainNativeExecutor, - Network, - (), - SImportQueueService, - >, - > + ) -> sc_service::error::Result>, (), SImportQueueService>> where - SNetwork: TypeIdentity>, - STxHandler: TypeIdentity>, - Block::Hash: Unpin, - Block::Header: Unpin, - T![ConstructedRuntimeApi]: TaggedTransactionQueue - + BlockBuilder - + OffchainWorkerApi - + sp_api::Metadata - + sp_session::SessionKeys, + SNetwork: TypeIdentity>>, + STxHandler: TypeIdentity>>, + BlockHashOf: Unpin, + BlockHeaderOf: Unpin, + ConstructedRuntimeApiOf: TaggedTransactionQueue> + + BlockBuilder> + + OffchainWorkerApi> + + sp_api::Metadata> + + sp_session::SessionKeys>, { let NodeBuilder { client, @@ -555,12 +535,12 @@ where pub fn install_manual_seal( &mut self, - manual_seal_config: ManualSealConfiguration, - ) -> sc_service::error::Result>>> + manual_seal_config: ManualSealConfiguration, BI, SC, CIDP>, + ) -> sc_service::error::Result>>>> where - BI: BlockImport + Send + Sync + 'static, - SC: SelectChain + 'static, - CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport, Error = sp_consensus::Error> + Send + Sync + 'static, + SC: SelectChain> + 'static, + CIDP: CreateInherentDataProviders, ()> + 'static, { let ManualSealConfiguration { sealing, @@ -592,7 +572,7 @@ where } let commands_stream: Box< - dyn Stream> + Send + Sync + Unpin, + dyn Stream>> + Send + Sync + Unpin, > = match sealing { Sealing::Instant => { Box::new( @@ -649,12 +629,10 @@ where para_id: ParaId, relay_chain_interface: RCInterface, relay_chain_slot_duration: Duration, - ) -> sc_service::error::Result< - NodeBuilder, - > + ) -> sc_service::error::Result> where - SNetwork: TypeIdentity>, - SImportQueueService: TypeIdentity, + SNetwork: TypeIdentity>>, + SImportQueueService: TypeIdentity>, RCInterface: RelayChainInterface + Clone + 'static, { let NodeBuilder { diff --git a/node/src/service.rs b/node/src/service.rs index fb2679e20..ce3cc6efc 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -99,6 +99,13 @@ impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { } } +struct ClientConfig; +impl node_common::service::Config for ClientConfig { + type Block = Block; + type RuntimeApi = RuntimeApi; + type ParachainNativeExecutor = ParachainNativeExecutor; +} + type ParachainExecutor = NativeElseWasmExecutor; pub type ParachainClient = TFullClient; @@ -296,7 +303,7 @@ async fn start_node_impl( // Create a `NodeBuilder` which helps setup parachain nodes common systems. let mut node_builder = - node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone())?; + node_common::service::NodeBuilder::::new(¶chain_config, hwbench.clone())?; // The nimbus import queue ONLY checks the signature correctness // Any other checks corresponding to the author-correctness should be done @@ -1272,7 +1279,8 @@ pub fn start_dev_node( let parachain_config = prepare_node_config(orchestrator_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = node_common::service::NodeBuilder::new(¶chain_config, hwbench.clone())?; + let node_builder = + node_common::service::NodeBuilder::::new(¶chain_config, hwbench.clone())?; // This node block import. let block_import = DevParachainBlockImport::new(node_builder.client.clone()); From 3a3297852048959c99a1cf1125cfbf847817ac99 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Wed, 8 Nov 2023 11:30:12 +0100 Subject: [PATCH 16/29] new_builder trait fn --- client/node-common/src/service.rs | 19 ++++++++++++++++++- node/src/service.rs | 11 +++++------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 18782baeb..1e4648c20 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -61,6 +61,23 @@ pub trait Config { type Block; type RuntimeApi; type ParachainNativeExecutor; + + #[must_use] + fn new_builder( + parachain_config: &Configuration, + hwbench: Option, + ) -> Result, sc_service::Error> + where + Self: Sized, + BlockOf: cumulus_primitives_core::BlockT, + ParachainNativeExecutorOf: NativeExecutionDispatch + 'static, + RuntimeApiOf: + ConstructRuntimeApi, ClientOf> + Sync + Send + 'static, + ConstructedRuntimeApiOf: + TaggedTransactionQueue> + BlockBuilder>, + { + NodeBuilder::::new(parachain_config, hwbench) + } } pub type BlockOf = ::Block; @@ -149,7 +166,7 @@ where /// network-dependent objects (as it requires an import queue, which usually /// is different for each node). #[must_use] - pub fn new( + fn new( parachain_config: &Configuration, hwbench: Option, ) -> Result { diff --git a/node/src/service.rs b/node/src/service.rs index ce3cc6efc..f86a32351 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -45,6 +45,7 @@ use { frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, futures::{channel::mpsc, FutureExt, StreamExt}, nimbus_primitives::NimbusPair, + node_common::service::Config as NodeBuilderConfig, node_common::service::{ManualSealConfiguration, NodeBuilder, Sealing}, pallet_registrar_runtime_api::RegistrarApi, polkadot_cli::ProvideRuntimeApi, @@ -99,8 +100,8 @@ impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { } } -struct ClientConfig; -impl node_common::service::Config for ClientConfig { +struct NodeConfig; +impl NodeBuilderConfig for NodeConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = ParachainNativeExecutor; @@ -302,8 +303,7 @@ async fn start_node_impl( let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let mut node_builder = - node_common::service::NodeBuilder::::new(¶chain_config, hwbench.clone())?; + let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; // The nimbus import queue ONLY checks the signature correctness // Any other checks corresponding to the author-correctness should be done @@ -1279,8 +1279,7 @@ pub fn start_dev_node( let parachain_config = prepare_node_config(orchestrator_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = - node_common::service::NodeBuilder::::new(¶chain_config, hwbench.clone())?; + let node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; // This node block import. let block_import = DevParachainBlockImport::new(node_builder.client.clone()); From e47a688270d5889e2da2e28829b984a274177f30 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Wed, 8 Nov 2023 16:42:27 +0100 Subject: [PATCH 17/29] start_collator --- Cargo.lock | 1 + client/node-common/Cargo.toml | 1 + client/node-common/src/service.rs | 121 +++++++++++++++- node/src/service.rs | 222 ++++++++---------------------- 4 files changed, 183 insertions(+), 162 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e24a199ae..56d54396c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6854,6 +6854,7 @@ dependencies = [ "clap", "core_extensions", "cumulus-client-cli", + "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", "cumulus-client-network", diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml index 78b7c44d5..1da6af0cc 100644 --- a/client/node-common/Cargo.toml +++ b/client/node-common/Cargo.toml @@ -74,6 +74,7 @@ polkadot-service = { workspace = true } # Cumulus cumulus-client-cli = { workspace = true } +cumulus-client-collator = { workspace = true } cumulus-client-consensus-aura = { workspace = true } cumulus-client-consensus-common = { workspace = true } cumulus-client-network = { workspace = true } diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 1e4648c20..15c8b4342 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -14,11 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . +use sc_service::SpawnTaskHandle; + use { async_io::Timer, core::time::Duration, core_extensions::TypeIdentity, cumulus_client_cli::CollatorOptions, + cumulus_client_consensus_common::ParachainConsensus, cumulus_client_service::{ build_relay_chain_interface, CollatorSybilResistance, StartFullNodeParams, }, @@ -50,6 +53,7 @@ use { sp_api::ConstructRuntimeApi, sp_block_builder::BlockBuilder, sp_consensus::SelectChain, + sp_core::traits::SpawnEssentialNamed, sp_inherents::CreateInherentDataProviders, sp_offchain::OffchainWorkerApi, sp_runtime::Percent, @@ -91,6 +95,7 @@ pub type BackendOf = TFullBackend>; pub type ConstructedRuntimeApiOf = as ConstructRuntimeApi, ClientOf>>::RuntimeApi; pub type ImportQueueServiceOf = Box>>; +pub type ParachainConsensusOf = Box>>; // `Cumulus` and `TxHandler` are types that will change during the life of // a `NodeBuilder` because they are generated and consumed when calling @@ -641,7 +646,7 @@ where Ok(command_sink) } - pub fn start_full_node<'a, RCInterface>( + pub fn start_full_node( self, para_id: ParaId, relay_chain_interface: RCInterface, @@ -710,6 +715,120 @@ where import_queue_service: (), }) } + + pub async fn start_collator( + self, + para_id: ParaId, + relay_chain_interface: RCInterface, + relay_chain_slot_duration: Duration, + parachain_consensus: ParachainConsensusOf, + collator_key: CollatorPair, + ) -> sc_service::error::Result> + where + SNetwork: TypeIdentity>>, + SImportQueueService: TypeIdentity>, + RCInterface: RelayChainInterface + Clone + 'static, + { + let NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + mut task_manager, + keystore_container, + hwbench, + prometheus_registry, + network, + tx_handler_controller, + import_queue_service, + } = self; + + let network = TypeIdentity::into_type(network); + let import_queue_service = TypeIdentity::into_type(import_queue_service); + + let spawner = task_manager.spawn_handle(); + let announce_block = { + let sync_service = network.sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + let params = cumulus_client_service::StartCollatorParams { + para_id, + block_status: client.clone(), + announce_block: announce_block.clone(), + client: client.clone(), + task_manager: &mut task_manager, + relay_chain_interface: relay_chain_interface.clone(), + spawner: spawner.clone(), + parachain_consensus: parachain_consensus, + import_queue: import_queue_service, + collator_key, + relay_chain_slot_duration, + recovery_handle: Box::new(overseer_handle.clone()), + sync_service: network.sync_service.clone(), + }; + + // TODO: change for async backing + #[allow(deprecated)] + cumulus_client_service::start_collator(params).await?; + + Ok(NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network: TypeIdentity::from_type(network), + tx_handler_controller, + import_queue_service: (), + }) + } + + pub fn cumulus_client_collator_params_generator( + &self, + para_id: ParaId, + overseer_handle: cumulus_relay_chain_interface::OverseerHandle, + collator_key: CollatorPair, + parachain_consensus: ParachainConsensusOf, + ) -> impl Fn() -> cumulus_client_collator::StartCollatorParams< + BlockOf, + ClientOf, + ClientOf, + SpawnTaskHandle, + > + Send + + Clone + + 'static + where + SNetwork: TypeIdentity>>, + { + let network = TypeIdentity::as_type(&self.network); + + let client = self.client.clone(); + let announce_block = { + let sync_service = network.sync_service.clone(); + Arc::new(move |hash, data| sync_service.announce_block(hash, data)) + }; + let spawner = self.task_manager.spawn_handle(); + + move || cumulus_client_collator::StartCollatorParams { + runtime_api: client.clone(), + block_status: client.clone(), + announce_block: announce_block.clone(), + overseer_handle: overseer_handle.clone(), + spawner: spawner.clone(), + para_id: para_id.clone(), + key: collator_key.clone(), + parachain_consensus: parachain_consensus.clone(), + } + } } /// Block authoring scheme to be used by the dev service. diff --git a/node/src/service.rs b/node/src/service.rs index f86a32351..f09aed6b6 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -323,19 +323,13 @@ async fn start_node_impl( false, )?; - // let params = new_partial(¶chain_config)?; - // let (block_import, mut telemetry, telemetry_worker_handle) = params.other; - - // let client = params.client.clone(); - // let backend = params.backend.clone(); - // let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = node_builder .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) .await?; - // let force_authoring = parachain_config.force_authoring; let validator = parachain_config.role.is_authority(); + let force_authoring = parachain_config.force_authoring; + // let prometheus_registry = parachain_config.prometheus_registry().cloned(); // let transaction_pool = params.transaction_pool.clone(); // let import_queue_service = params.import_queue.service(); @@ -383,169 +377,76 @@ async fn start_node_impl( let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; - // if parachain_config.offchain_worker.enabled { - // node_builder.task_manager.spawn_handle().spawn( - // "offchain-workers-runner", - // "offchain-work", - // sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - // runtime_api_provider: node_builder.client.clone(), - // keystore: Some(node_builder.keystore_container.keystore()), - // offchain_db: node_builder.backend.offchain_storage(), - // transaction_pool: Some(OffchainTransactionPoolFactory::new( - // node_builder.transaction_pool.clone(), - // )), - // network_provider: node_builder.network.network.clone(), - // is_validator: parachain_config.role.is_authority(), - // enable_http_requests: false, - // custom_extensions: move |_| vec![], - // }) - // .run(node_builder.client.clone(), node_builder.task_manager.spawn_handle()) - // .boxed(), - // ); - // } - - // sc_service::spawn_tasks(sc_service::SpawnTasksParams { - // rpc_builder, - // client: client.clone(), - // transaction_pool: transaction_pool.clone(), - // task_manager: &mut task_manager, - // config: parachain_config, - // keystore: params.keystore_container.keystore(), - // backend: backend.clone(), - // network: network.clone(), - // system_rpc_tx, - // tx_handler_controller, - // telemetry: telemetry.as_mut(), - // sync_service: sync_service.clone(), - // })?; - - // if let Some(hwbench) = hwbench { - // sc_sysinfo::print_hwbench(&hwbench); - // // Here you can check whether the hardware meets your chains' requirements. Putting a link - // // in there and swapping out the requirements for your own are probably a good idea. The - // // requirements for a para-chain are dictated by its relay-chain. - // if !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) && validator { - // log::warn!( - // "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." - // ); - // } - - // if let Some(ref mut telemetry) = telemetry { - // let telemetry_handle = telemetry.handle(); - // task_manager.spawn_handle().spawn( - // "telemetry_hwbench", - // None, - // sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - // ); - // } - // } - - // let announce_block = { - // let sync_service = sync_service.clone(); - // Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - // }; - let relay_chain_slot_duration = Duration::from_secs(6); - let overseer_handle = relay_chain_interface .overseer_handle() .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - // let sync_keystore = node_builder.keystore_container.keystore(); - // let mut collate_on_tanssi = None; + let sync_keystore = node_builder.keystore_container.keystore(); + let mut collate_on_tanssi = None; let mut node_builder = if validator { - todo!("node_builder.start_collator"); - - // let parachain_consensus = build_consensus_orchestrator( - // node_builder.client.clone(), - // block_import, - // node_builder.prometheus_registry.as_ref(), - // node_builder.telemetry.as_ref().map(|t| t.handle()), - // &node_builder.task_manager, - // relay_chain_interface.clone(), - // node_builder.transaction_pool, - // node_builder.network.sync_service.clone(), - // node_builder.keystore_container.keystore(), - // force_authoring, - // para_id, - // )?; - - // // Start task which detects para id assignment, and starts/stops container chains. - // // Note that if this node was started without a `container_chain_config`, we don't - // // support collation on container chains, so there is no need to detect changes to assignment - // if container_chain_config.is_some() { - // build_check_assigned_para_id( - // node_builder.client.clone(), - // sync_keystore.clone(), - // cc_spawn_tx.clone(), - // node_builder.ask_manager.spawn_essential_handle(), - // ); - // } - - // let spawner = task_manager.spawn_handle(); - // let params = StartCollatorParams { - // para_id, - // block_status: node_builder.client.clone(), - // announce_block: announce_block.clone(), - // client: node_builder.client.clone(), - // task_manager: &mut node_builder.task_manager, - // relay_chain_interface: relay_chain_interface.clone(), - // spawner: spawner.clone(), - // parachain_consensus: parachain_consensus.clone(), - // import_queue: import_queue_service, - // collator_key: collator_key - // .clone() - // .expect("Command line arguments do not allow this. qed"), - // relay_chain_slot_duration, - // recovery_handle: Box::new(overseer_handle.clone()), - // sync_service, - // }; - - // let client = client.clone(); - // let collator_key = collator_key.clone(); - // // TODO: change for async backing - // collate_on_tanssi = Some(move || async move { - // #[allow(deprecated)] - // cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams { - // runtime_api: client.clone(), - // block_status: client.clone(), - // announce_block, - // overseer_handle, - // spawner, - // para_id, - // key: collator_key - // .clone() - // .expect("Command line arguments do not allow this. qed"), - // parachain_consensus, - // }) - // .await; - // }); - - // // TODO: change for async backing - // #[allow(deprecated)] - // start_collator(params).await?; + let collator_key = collator_key + .clone() + .expect("Command line arguments do not allow this. qed"); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + + // Start task which detects para id assignment, and starts/stops container chains. + // Note that if this node was started without a `container_chain_config`, we don't + // support collation on container chains, so there is no need to detect changes to assignment + if container_chain_config.is_some() { + build_check_assigned_para_id( + node_builder.client.clone(), + sync_keystore.clone(), + cc_spawn_tx.clone(), + node_builder.task_manager.spawn_essential_handle(), + ); + } + + let parachain_consensus = build_consensus_orchestrator( + node_builder.client.clone(), + block_import, + node_builder.prometheus_registry.as_ref(), + node_builder.telemetry.as_ref().map(|t| t.handle()), + &node_builder.task_manager, + relay_chain_interface.clone(), + node_builder.transaction_pool.clone(), + node_builder.network.sync_service.clone(), + node_builder.keystore_container.keystore(), + force_authoring, + para_id, + )?; + + let params_generator = node_builder.cumulus_client_collator_params_generator( + para_id, + overseer_handle, + collator_key.clone(), + parachain_consensus.clone(), + ); + + // TODO: change for async backing + collate_on_tanssi = Some(move || async move { + #[allow(deprecated)] + cumulus_client_collator::start_collator(params_generator()).await; + }); + + node_builder + .start_collator( + para_id, + relay_chain_interface.clone(), + relay_chain_slot_duration, + parachain_consensus, + collator_key, + ) + .await? } else { node_builder.start_full_node( para_id, relay_chain_interface.clone(), relay_chain_slot_duration, )? - // let params = StartFullNodeParams { - // client: client.clone(), - // announce_block, - // task_manager: &mut task_manager, - // para_id, - // relay_chain_interface: relay_chain_interface.clone(), - // relay_chain_slot_duration, - // import_queue: import_queue_service, - // recovery_handle: Box::new(overseer_handle), - // sync_service, - // }; - - // // TODO: change for async backing - // #[allow(deprecated)] - // start_full_node(params)?; }; node_builder.network.start_network.start_network(); @@ -591,8 +492,7 @@ async fn start_node_impl( validator, spawn_handle, state: Default::default(), - collate_on_tanssi: todo!(), - // collate_on_tanssi: Arc::new(move || Box::pin((collate_on_tanssi.clone().unwrap())())), + collate_on_tanssi: Arc::new(move || Box::pin((collate_on_tanssi.clone().unwrap())())), }; let state = container_chain_spawner.state.clone(); From 63adfc33422400e1dcb377e8b7499ea5ff836895 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 10 Nov 2023 15:42:11 +0100 Subject: [PATCH 18/29] update frontier template --- .../templates/frontier/node/src/cli.rs | 3 +- .../templates/frontier/node/src/service.rs | 645 ++++++------------ node/src/service.rs | 19 - 3 files changed, 224 insertions(+), 443 deletions(-) diff --git a/container-chains/templates/frontier/node/src/cli.rs b/container-chains/templates/frontier/node/src/cli.rs index d6d5313b7..e92d65cca 100644 --- a/container-chains/templates/frontier/node/src/cli.rs +++ b/container-chains/templates/frontier/node/src/cli.rs @@ -16,6 +16,7 @@ use { clap::Parser, + node_common::service::Sealing, sc_cli::{CliConfiguration, NodeKeyParams, SharedParams}, std::path::PathBuf, }; @@ -95,7 +96,7 @@ pub struct RunCmd { /// /// Options are "instant", "manual", or timer interval in milliseconds #[arg(long, default_value = "instant")] - pub sealing: crate::service::Sealing, + pub sealing: Sealing, } impl std::ops::Deref for RunCmd { diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 9314d9797..c6a50fc9b 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -16,61 +16,42 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +#[allow(deprecated)] use { - cumulus_client_consensus_common::ParachainBlockImport, - sc_network::config::FullNetworkConfiguration, -}; -// std -use { + crate::client::TemplateRuntimeExecutor, + container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi}, cumulus_client_cli::CollatorOptions, + cumulus_client_consensus_common::ParachainBlockImport, + cumulus_client_service::prepare_node_config, + cumulus_primitives_core::ParaId, cumulus_primitives_parachain_inherent::{ MockValidationDataInherentDataProvider, MockXcmConfig, }, fc_consensus::FrontierBlockImport, - futures::FutureExt, + fc_db::DatabaseSource, + fc_rpc_core::types::{FeeHistoryCache, FilterPool}, nimbus_primitives::NimbusId, - sc_client_api::Backend, - sc_transaction_pool_api::OffchainTransactionPoolFactory, + node_common::service::{Config as NodeBuilderConfig, ManualSealConfiguration, Sealing}, + sc_executor::NativeElseWasmExecutor, + sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, + sc_telemetry::{Telemetry, TelemetryWorkerHandle}, sp_consensus_aura::SlotDuration, - sp_core::Pair, + sp_core::{Pair, H256}, std::{ collections::BTreeMap, sync::{Arc, Mutex}, time::Duration, }, }; -// Local Runtime Types -use { - container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi}, - futures::StreamExt, -}; -// Cumulus Imports -#[allow(deprecated)] -use { - cumulus_client_service::{ - build_relay_chain_interface, prepare_node_config, start_full_node, CollatorSybilResistance, - StartFullNodeParams, - }, - cumulus_primitives_core::ParaId, - cumulus_relay_chain_interface::RelayChainInterface, -}; - -// Substrate Imports -use { - fc_db::DatabaseSource, - fc_rpc_core::types::{FeeHistoryCache, FilterPool}, - sc_consensus::ImportQueue, - sc_executor::NativeElseWasmExecutor, - sc_network::NetworkBlock, - sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, - sc_telemetry::{Telemetry, TelemetryWorkerHandle}, -}; - -use node_common::service::NodeBuilder; +struct NodeConfig; +impl NodeBuilderConfig for NodeConfig { + type Block = Block; + type RuntimeApi = RuntimeApi; + type ParachainNativeExecutor = TemplateRuntimeExecutor; +} /// Native executor type. -use crate::client::TemplateRuntimeExecutor; pub type ParachainExecutor = NativeElseWasmExecutor; @@ -128,6 +109,27 @@ thread_local!(static TIMESTAMP: std::cell::RefCell = std::cell::RefCell::ne /// Provide a mock duration starting at 0 in millisecond for timestamp inherent. /// Each call will increment timestamp by slot_duration making Aura think time has passed. struct MockTimestampInherentDataProvider; +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { + async fn provide_inherent_data( + &self, + inherent_data: &mut sp_inherents::InherentData, + ) -> Result<(), sp_inherents::Error> { + TIMESTAMP.with(|x| { + *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION; + inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow()) + }) + } + + async fn try_handle_error( + &self, + _identifier: &sp_inherents::InherentIdentifier, + _error: &[u8], + ) -> Option> { + // The pallet never reports error. + None + } +} /// Starts a `ServiceBuilder` for a full service. /// @@ -235,53 +237,21 @@ async fn start_node_impl( rpc_config: crate::cli::RpcConfig, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc)> { - let mut parachain_config = prepare_node_config(parachain_config); - - let params = new_partial(&mut parachain_config, false)?; - let ( - _block_import, - filter_pool, - mut telemetry, - telemetry_worker_handle, - frontier_backend, - fee_history_cache, - ) = params.other; - - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - - let (relay_chain_interface, _collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - parachain_config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue: params.import_queue, - para_id, - relay_chain_interface: relay_chain_interface.clone(), - net_config, - sybil_resistance_level: CollatorSybilResistance::Resistant, - }) - .await?; + let parachain_config = prepare_node_config(parachain_config); + + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; - let overrides = crate::rpc::overrides_handle(client.clone()); + // Frontier specific stuff + let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); + let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); + let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend( + node_builder.client.clone(), + ¶chain_config, + )?); + let frontier_block_import = + FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); + let overrides = crate::rpc::overrides_handle(node_builder.client.clone()); let fee_history_limit = rpc_config.fee_history_limit; let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks< @@ -289,63 +259,75 @@ async fn start_node_impl( > = Default::default(); let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + // The parachain block import and import queue + let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( + frontier_block_import, + node_builder.backend.clone(), + ); + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + parachain_block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + )?; + + // Relay chain interface + let (relay_chain_interface, _collator_key) = node_builder + .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) + .await?; + + // Build cumulus network, allowing to access network-related services. + let node_builder = node_builder + .build_cumulus_network( + ¶chain_config, + para_id, + import_queue, + relay_chain_interface.clone(), + ) + .await?; + crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams { - task_manager: &task_manager, - client: client.clone(), - substrate_backend: backend.clone(), + task_manager: &node_builder.task_manager, + client: node_builder.client.clone(), + substrate_backend: node_builder.backend.clone(), frontier_backend: frontier_backend.clone(), filter_pool: filter_pool.clone(), overrides: overrides.clone(), fee_history_limit, fee_history_cache: fee_history_cache.clone(), - sync_service: sync_service.clone(), + sync_service: node_builder.network.sync_service.clone(), pubsub_notification_sinks: pubsub_notification_sinks.clone(), }); - if parachain_config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(params.keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: parachain_config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } - let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), + node_builder.task_manager.spawn_handle(), overrides.clone(), rpc_config.eth_log_block_cache, rpc_config.eth_statuses_cache, - prometheus_registry.clone(), + node_builder.prometheus_registry.clone(), )); let rpc_builder = { - let client = client.clone(); - let pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let pool = node_builder.transaction_pool.clone(); let pubsub_notification_sinks = pubsub_notification_sinks; - let network = network.clone(); - let sync = sync_service.clone(); + let network = node_builder.network.network.clone(); + let sync = node_builder.network.sync_service.clone(); let filter_pool = filter_pool.clone(); let frontier_backend = frontier_backend.clone(); - let backend = backend.clone(); + let backend = node_builder.backend.clone(); let max_past_logs = rpc_config.max_past_logs; let overrides = overrides; let fee_history_cache = fee_history_cache.clone(); let block_data_cache = block_data_cache; - move |deny_unsafe, subscription_task_executor| { + Box::new(move |deny_unsafe, subscription_task_executor| { let deps = crate::rpc::FullDeps { backend: backend.clone(), client: client.clone(), @@ -374,53 +356,21 @@ async fn start_node_impl( pubsub_notification_sinks.clone(), ) .map_err(Into::into) - } + }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder: Box::new(rpc_builder), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend, - network, - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - sync_service: sync_service.clone(), - })?; - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; let relay_chain_slot_duration = Duration::from_secs(6); - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, + let node_builder = node_builder.start_full_node( para_id, - relay_chain_interface, + relay_chain_interface.clone(), relay_chain_slot_duration, - import_queue: import_queue_service, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; + )?; - // TODO: change for async backing - #[allow(deprecated)] - start_full_node(params)?; + node_builder.network.start_network.start_network(); - start_network.start_network(); - - Ok((task_manager, client)) + Ok((node_builder.task_manager, node_builder.client)) } /// Start a parachain node. @@ -455,266 +405,170 @@ use {sp_blockchain::HeaderBackend, std::str::FromStr}; /// Builds a new development service. This service uses manual seal, and mocks /// the parachain inherent. pub async fn start_dev_node( - mut config: Configuration, + mut parachain_config: Configuration, sealing: Sealing, rpc_config: crate::cli::RpcConfig, para_id: ParaId, hwbench: Option, ) -> Result { - use { - async_io::Timer, - futures::Stream, - sc_consensus_manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}, - sp_core::H256, - }; + // TODO: Not present before, is this wanted and was forgotten? + // let parachain_config = prepare_node_config(parachain_config); - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain: maybe_select_chain, - transaction_pool, - other: - ( - block_import, - filter_pool, - mut telemetry, - _telemetry_worker_handle, - frontier_backend, - fee_history_cache, - ), - } = new_partial(&mut config, true)?; - - let net_config = FullNetworkConfiguration::new(&config.network); - - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: None, - net_config, - })?; + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; - if config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } - - let prometheus_registry = config.prometheus_registry().cloned(); - let overrides = crate::rpc::overrides_handle(client.clone()); + // Frontier specific stuff + let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); + let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); + let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend( + node_builder.client.clone(), + ¶chain_config, + )?); + let frontier_block_import = + FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); + let overrides = crate::rpc::overrides_handle(node_builder.client.clone()); let fee_history_limit = rpc_config.fee_history_limit; - let collator = config.role.is_authority(); - let mut command_sink = None; - let mut xcm_senders = None; let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks< fc_mapping_sync::EthereumBlockNotification, > = Default::default(); let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); - if collator { - let env = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); + // The parachain block import and import queue + let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( + frontier_block_import, + node_builder.backend.clone(), + ); + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + parachain_block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + )?; + + let validator = parachain_config.role.is_authority(); + let force_authoring = parachain_config.force_authoring; + + // Build a Substrate Network. (not cumulus since it is a dev node, it mocks + // the relaychain) + let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; + + let mut command_sink = None; + let mut xcm_senders = None; - // Create channels for mocked XCM messages. + if parachain_config.role.is_authority() { + let client = node_builder.client.clone(); let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); xcm_senders = Some((downward_xcm_sender, hrmp_xcm_sender)); - let commands_stream: Box> + Send + Sync + Unpin> = - match sealing { - Sealing::Instant => { - Box::new( - // This bit cribbed from the implementation of instant seal. - transaction_pool - .pool() - .validated_pool() - .import_notification_stream() - .map(|_| EngineCommand::SealNewBlock { - create_empty: false, - finalize: false, - parent_hash: None, - sender: None, - }), - ) - } - Sealing::Manual => { - let (sink, stream) = futures::channel::mpsc::channel(1000); - // Keep a reference to the other end of the channel. It goes to the RPC. - command_sink = Some(sink); - Box::new(stream) - } - Sealing::Interval(millis) => Box::new(StreamExt::map( - Timer::interval(Duration::from_millis(millis)), - |_| EngineCommand::SealNewBlock { - create_empty: true, - finalize: false, - parent_hash: None, - sender: None, - }, - )), - }; - - let select_chain = maybe_select_chain.expect( - "`new_partial` builds a `LongestChainRule` when building dev service.\ - We specified the dev service when calling `new_partial`.\ - Therefore, a `LongestChainRule` is present. qed.", - ); - - let client_set_aside_for_cidp = client.clone(); - - #[async_trait::async_trait] - impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { - async fn provide_inherent_data( - &self, - inherent_data: &mut sp_inherents::InherentData, - ) -> Result<(), sp_inherents::Error> { - TIMESTAMP.with(|x| { - *x.borrow_mut() += container_chain_template_frontier_runtime::SLOT_DURATION; - inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow()) - }) - } - - async fn try_handle_error( - &self, - _identifier: &sp_inherents::InherentIdentifier, - _error: &[u8], - ) -> Option> { - // The pallet never reports error. - None - } - } - let authorities = vec![get_aura_id_from_seed("alice")]; - task_manager.spawn_essential_handle().spawn_blocking( - "authorship_task", - Some("block-authoring"), - run_manual_seal(ManualSealParams { - block_import, - env, - client: client.clone(), - pool: transaction_pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: Some(Box::new(tc_consensus::ContainerManualSealAuraConsensusDataProvider::new( + command_sink = node_builder.install_manual_seal(ManualSealConfiguration { + block_import: parachain_block_import, + sealing, + soft_deadline: None, + select_chain: sc_consensus::LongestChain::new(node_builder.backend.clone()), + consensus_data_provider: Some(Box::new( + tc_consensus::ContainerManualSealAuraConsensusDataProvider::new( client.clone(), - keystore_container.keystore(), - SlotDuration::from_millis(container_chain_template_frontier_runtime::SLOT_DURATION), + node_builder.keystore_container.keystore(), + SlotDuration::from_millis( + container_chain_template_frontier_runtime::SLOT_DURATION, + ), authorities.clone(), - ))), - create_inherent_data_providers: move |block: H256, ()| { - let current_para_block = client_set_aside_for_cidp - .number(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - let client_for_xcm = client_set_aside_for_cidp.clone(); - let authorities_for_cidp = authorities.clone(); - - let downward_xcm_receiver = downward_xcm_receiver.clone(); - let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); - - async move { - let mocked_authorities_noting = - ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - orchestrator_para_id: crate::chain_spec::ORCHESTRATOR, - container_para_id: para_id, - authorities: authorities_for_cidp - }; - - let time = MockTimestampInherentDataProvider; - let mocked_parachain = MockValidationDataInherentDataProvider { + ), + )), + create_inherent_data_providers: move |block: H256, ()| { + let current_para_block = client + .number(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); + + let client_for_xcm = client.clone(); + let authorities_for_cidp = authorities.clone(); + + let downward_xcm_receiver = downward_xcm_receiver.clone(); + let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); + + async move { + let mocked_authorities_noting = + ccp_authorities_noting_inherent::MockAuthoritiesNotingInherentDataProvider { current_para_block, relay_offset: 1000, relay_blocks_per_para_block: 2, - // TODO: Recheck - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new( - &*client_for_xcm, - block, - para_id, - Default::default(), - ), - raw_downward_messages: downward_xcm_receiver.drain().collect(), - raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), - additional_key_values: Some(mocked_authorities_noting.get_key_values()) - }; - - Ok((time, mocked_parachain, mocked_authorities_noting)) - } - }, - }), - ); + orchestrator_para_id: crate::chain_spec::ORCHESTRATOR, + container_para_id: para_id, + authorities: authorities_for_cidp + }; + + let time = MockTimestampInherentDataProvider; + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + // TODO: Recheck + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: MockXcmConfig::new( + &*client_for_xcm, + block, + para_id, + Default::default(), + ), + raw_downward_messages: downward_xcm_receiver.drain().collect(), + raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), + additional_key_values: Some(mocked_authorities_noting.get_key_values()), + }; + + Ok((time, mocked_parachain, mocked_authorities_noting)) + } + }, + })?; } crate::rpc::spawn_essential_tasks(crate::rpc::SpawnTasksParams { - task_manager: &task_manager, - client: client.clone(), - substrate_backend: backend.clone(), + task_manager: &node_builder.task_manager, + client: node_builder.client.clone(), + substrate_backend: node_builder.backend.clone(), frontier_backend: frontier_backend.clone(), filter_pool: filter_pool.clone(), overrides: overrides.clone(), fee_history_limit, fee_history_cache: fee_history_cache.clone(), - sync_service: sync_service.clone(), + sync_service: node_builder.network.sync_service.clone(), pubsub_notification_sinks: pubsub_notification_sinks.clone(), }); let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), + node_builder.task_manager.spawn_handle(), overrides.clone(), rpc_config.eth_log_block_cache, rpc_config.eth_statuses_cache, - prometheus_registry, + node_builder.prometheus_registry.clone(), )); let rpc_builder = { - let client = client.clone(); - let pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let pool = node_builder.transaction_pool.clone(); let pubsub_notification_sinks = pubsub_notification_sinks; - let network = network.clone(); - let sync = sync_service.clone(); + let network = node_builder.network.network.clone(); + let sync = node_builder.network.sync_service.clone(); let filter_pool = filter_pool; let frontier_backend = frontier_backend; - let backend = backend.clone(); + let backend = node_builder.backend.clone(); let max_past_logs = rpc_config.max_past_logs; let overrides = overrides; let fee_history_cache = fee_history_cache; let block_data_cache = block_data_cache; - move |deny_unsafe, subscription_task_executor| { + Box::new(move |deny_unsafe, subscription_task_executor| { let deps = crate::rpc::FullDeps { backend: backend.clone(), client: client.clone(), @@ -743,68 +597,13 @@ pub async fn start_dev_node( pubsub_notification_sinks.clone(), ) .map_err(Into::into) - } + }) }; - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network, - client, - keystore: keystore_container.keystore(), - task_manager: &mut task_manager, - transaction_pool, - rpc_builder: Box::new(rpc_builder), - backend, - system_rpc_tx, - sync_service, - config, - tx_handler_controller, - telemetry: None, - })?; - - if let Some(hwbench) = hwbench { - sc_sysinfo::print_hwbench(&hwbench); - - if let Some(ref mut telemetry) = telemetry { - let telemetry_handle = telemetry.handle(); - task_manager.spawn_handle().spawn( - "telemetry_hwbench", - None, - sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), - ); - } - } + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; log::info!("Development Service Ready"); - network_starter.start_network(); - Ok(task_manager) -} - -/// TODO: move it somewhere common, code duplication -/// Block authoring scheme to be used by the dev service. -#[derive(Debug, Copy, Clone)] -pub enum Sealing { - /// Author a block immediately upon receiving a transaction into the transaction pool - Instant, - /// Author a block upon receiving an RPC command - Manual, - /// Author blocks at a regular interval specified in milliseconds - Interval(u64), -} - -impl FromStr for Sealing { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(match s { - "instant" => Self::Instant, - "manual" => Self::Manual, - s => { - let millis = s - .parse::() - .map_err(|_| "couldn't decode sealing param")?; - Self::Interval(millis) - } - }) - } + node_builder.network.start_network.start_network(); + Ok(node_builder.task_manager) } diff --git a/node/src/service.rs b/node/src/service.rs index f09aed6b6..ca0811ffe 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -330,25 +330,6 @@ async fn start_node_impl( let validator = parachain_config.role.is_authority(); let force_authoring = parachain_config.force_authoring; - // let prometheus_registry = parachain_config.prometheus_registry().cloned(); - // let transaction_pool = params.transaction_pool.clone(); - // let import_queue_service = params.import_queue.service(); - // let net_config = FullNetworkConfiguration::new(¶chain_config.network); - - // let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - // cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - // parachain_config: ¶chain_config, - // client: client.clone(), - // transaction_pool: transaction_pool.clone(), - // spawn_handle: task_manager.spawn_handle(), - // import_queue: params.import_queue, - // para_id, - // relay_chain_interface: relay_chain_interface.clone(), - // net_config, - // sybil_resistance_level: CollatorSybilResistance::Resistant, - // }) - // .await?; - let node_builder = node_builder .build_cumulus_network( ¶chain_config, From bb2323681fb9d15b129a333df1beb61b33117a78 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Thu, 16 Nov 2023 11:46:07 +0100 Subject: [PATCH 19/29] method to not start a node yet --- client/node-common/src/service.rs | 40 +++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 15c8b4342..57f73cda8 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -53,7 +53,6 @@ use { sp_api::ConstructRuntimeApi, sp_block_builder::BlockBuilder, sp_consensus::SelectChain, - sp_core::traits::SpawnEssentialNamed, sp_inherents::CreateInherentDataProviders, sp_offchain::OffchainWorkerApi, sp_runtime::Percent, @@ -167,7 +166,7 @@ where ConstructedRuntimeApiOf: TaggedTransactionQueue> + BlockBuilder>, { /// Create a new `NodeBuilder` which prepare objects required to launch a - /// node. However it doesn't start anything, and doesn't provide any + /// node. However it only starts telemetry, and doesn't provide any /// network-dependent objects (as it requires an import queue, which usually /// is different for each node). #[must_use] @@ -792,6 +791,43 @@ where }) } + pub fn dont_start_node_yet( + self, + ) -> NodeBuilder + where + SNetwork: TypeIdentity>>, + { + let NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network, + tx_handler_controller, + import_queue_service: _, + } = self; + + NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network, + tx_handler_controller, + import_queue_service: (), + } + } + pub fn cumulus_client_collator_params_generator( &self, para_id: ParaId, From 3934056c18342d286a09aa722ab168ebe7ad8b34 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:19:22 +0100 Subject: [PATCH 20/29] start_node_impl_container + remove new_partial --- client/node-common/src/service.rs | 43 ++- node/src/command.rs | 31 +- node/src/container_chain_spawner.rs | 5 +- node/src/service.rs | 506 ++++++++-------------------- 4 files changed, 192 insertions(+), 393 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 57f73cda8..65e5d99ba 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -325,13 +325,14 @@ where let net_config = FullNetworkConfiguration::new(¶chain_config.network); let import_queue_service = import_queue.service(); + let spawn_handle = task_manager.spawn_handle(); let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { parachain_config: ¶chain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), + spawn_handle, import_queue: import_queue, para_id, relay_chain_interface: relay_chain_interface, @@ -791,9 +792,12 @@ where }) } - pub fn dont_start_node_yet( + pub fn extract_import_queue_service( self, - ) -> NodeBuilder + ) -> ( + NodeBuilder, + SImportQueueService, + ) where SNetwork: TypeIdentity>>, { @@ -809,23 +813,26 @@ where prometheus_registry, network, tx_handler_controller, - import_queue_service: _, + import_queue_service, } = self; - NodeBuilder { - client, - backend, - transaction_pool, - telemetry, - telemetry_worker_handle, - task_manager, - keystore_container, - hwbench, - prometheus_registry, - network, - tx_handler_controller, - import_queue_service: (), - } + ( + NodeBuilder { + client, + backend, + transaction_pool, + telemetry, + telemetry_worker_handle, + task_manager, + keystore_container, + hwbench, + prometheus_registry, + network, + tx_handler_controller, + import_queue_service: (), + }, + import_queue_service, + ) } pub fn cumulus_client_collator_params_generator( diff --git a/node/src/command.rs b/node/src/command.rs index 3c95cd581..ddffc9b04 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -18,13 +18,14 @@ use { crate::{ chain_spec, cli::{Cli, ContainerChainCli, RelayChainCli, Subcommand}, - service::{new_partial, IdentifyVariant}, + service::{self, IdentifyVariant, NodeConfig}, }, cumulus_client_cli::{extract_genesis_wasm, generate_genesis_block}, cumulus_primitives_core::ParaId, dancebox_runtime::Block, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, + node_common::service::Config as _, parity_scale_codec::Encode, sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, @@ -231,9 +232,11 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; runner.async_run(|$config| { - let $components = new_partial(&$config)?; + let $components = NodeConfig::new_builder(&$config, None)?; + let inner = { $( $code )* }; + let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) + inner.map(|v| (v, task_manager)) }) }} } @@ -284,7 +287,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::CheckBlock(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::ExportBlocks(cmd)) => { @@ -299,7 +303,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::ImportBlocks(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::Revert(cmd)) => { @@ -331,8 +336,8 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { - let partials = new_partial(&config)?; - cmd.run(&*config.chain_spec, &*partials.client) + let client = NodeConfig::new_builder(&config, None)?.client; + cmd.run(&*config.chain_spec, &*client) }) } Some(Subcommand::ExportGenesisWasm(params)) => { @@ -370,8 +375,8 @@ pub fn run() -> Result<()> { } } BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - let partials = new_partial(&config)?; - cmd.run(partials.client) + let client = NodeConfig::new_builder(&config, None)?.client; + cmd.run(client) }), #[cfg(not(feature = "runtime-benchmarks"))] BenchmarkCmd::Storage(_) => Err(sc_cli::Error::Input( @@ -381,10 +386,10 @@ pub fn run() -> Result<()> { )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - let partials = new_partial(&config)?; - let db = partials.backend.expose_db(); - let storage = partials.backend.expose_storage(); - cmd.run(config, partials.client.clone(), db, storage) + let builder = NodeConfig::new_builder(&config, None)?; + let db = builder.backend.expose_db(); + let storage = builder.backend.expose_storage(); + cmd.run(config, builder.client, db, storage) }), BenchmarkCmd::Machine(cmd) => { runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())) diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs index 5d524147d..6db612f8e 100644 --- a/node/src/container_chain_spawner.rs +++ b/node/src/container_chain_spawner.rs @@ -25,13 +25,14 @@ use { crate::{ cli::ContainerChainCli, container_chain_monitor::{SpawnedContainer, SpawnedContainersMonitor}, - service::{start_node_impl_container, ParachainClient}, + service::{start_node_impl_container, NodeConfig, ParachainClient}, }, cumulus_client_cli::generate_genesis_block, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, dancebox_runtime::{AccountId, Block, BlockNumber}, futures::FutureExt, + node_common::service::Config, pallet_author_noting_runtime_api::AuthorNotingApi, pallet_registrar_runtime_api::RegistrarApi, polkadot_primitives::CollatorPair, @@ -626,7 +627,7 @@ fn open_and_maybe_delete_db( container_chain_cli: &ContainerChainCli, keep_db: bool, ) -> sc_service::error::Result<()> { - let temp_cli = crate::service::new_partial(&container_chain_cli_config).unwrap(); + let temp_cli = NodeConfig::new_builder(&container_chain_cli_config, None).unwrap(); // Check block diff, only needed if keep-db is false if !keep_db { diff --git a/node/src/service.rs b/node/src/service.rs index ca0811ffe..72d9dc586 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -29,21 +29,17 @@ use { ParachainConsensus, }, cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange}, - cumulus_client_service::{ - build_relay_chain_interface, prepare_node_config, start_collator, start_full_node, - CollatorSybilResistance, StartCollatorParams, StartFullNodeParams, - }, + cumulus_client_service::prepare_node_config, cumulus_primitives_core::{ relay_chain::{CollatorPair, Hash as PHash}, - CollectCollationInfo, ParaId, + ParaId, }, cumulus_primitives_parachain_inherent::{ MockValidationDataInherentDataProvider, MockXcmConfig, }, cumulus_relay_chain_interface::RelayChainInterface, dancebox_runtime::{opaque::Block, RuntimeApi}, - frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE, - futures::{channel::mpsc, FutureExt, StreamExt}, + futures::{channel::mpsc, StreamExt}, nimbus_primitives::NimbusPair, node_common::service::Config as NodeBuilderConfig, node_common::service::{ManualSealConfiguration, NodeBuilder, Sealing}, @@ -51,29 +47,21 @@ use { polkadot_cli::ProvideRuntimeApi, polkadot_service::Handle, sc_client_api::{ - AuxStore, Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, HeaderBackend, - UsageProvider, + AuxStore, Backend as BackendT, BlockchainEvents, HeaderBackend, UsageProvider, }, - sc_consensus::{BlockImport, ImportQueue}, + sc_consensus::BasicQueue, + sc_consensus::BlockImport, sc_executor::NativeElseWasmExecutor, - sc_network::{config::FullNetworkConfiguration, NetworkBlock}, + sc_network::NetworkBlock, sc_network_sync::SyncingService, - sc_service::{ - Configuration, Error as ServiceError, PartialComponents, TFullBackend, TFullClient, - TaskManager, - }, - sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorkerHandle}, - sc_transaction_pool_api::OffchainTransactionPoolFactory, + sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, + sc_telemetry::TelemetryHandle, sp_api::StorageProof, sp_consensus::SyncOracle, - sp_core::{ - traits::{SpawnEssentialNamed, SpawnNamed}, - H256, - }, + sp_core::{traits::SpawnEssentialNamed, H256}, sp_keystore::KeystorePtr, - sp_runtime::traits::Block as BlockT, sp_state_machine::{Backend as StateBackend, StorageValue}, - std::{future::Future, pin::Pin, str::FromStr, sync::Arc, time::Duration}, + std::{future::Future, pin::Pin, sync::Arc, time::Duration}, substrate_prometheus_endpoint::Registry, tc_consensus::{BuildOrchestratorAuraConsensusParams, OrchestratorAuraConsensus}, tc_orchestrator_chain_interface::{ @@ -83,7 +71,6 @@ use { }; type FullBackend = TFullBackend; -type MaybeSelectChain = Option>; /// Native executor type. pub struct ParachainNativeExecutor; @@ -100,7 +87,7 @@ impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { } } -struct NodeConfig; +pub struct NodeConfig; impl NodeBuilderConfig for NodeConfig { type Block = Block; type RuntimeApi = RuntimeApi; @@ -144,68 +131,6 @@ impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { } } -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - config: &Configuration, -) -> Result< - PartialComponents< - ParachainClient, - ParachainBackend, - MaybeSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - ParachainBlockImport, - Option, - Option, - ), - >, - sc_service::Error, -> { - todo!() - // let NodeBuilder { - // client, - // backend, - // transaction_pool, - // telemetry, - // telemetry_worker_handle, - // task_manager, - // keystore_container, - // } = node_common::service::NodeBuilder::new(config)?; - - // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - // // The nimbus import queue ONLY checks the signature correctness - // // Any other checks corresponding to the author-correctness should be done - // // in the runtime - // let import_queue = nimbus_consensus::import_queue( - // client.clone(), - // block_import.clone(), - // move |_, _| async move { - // let time = sp_timestamp::InherentDataProvider::from_system_time(); - - // Ok((time,)) - // }, - // &task_manager.spawn_essential_handle(), - // config.prometheus_registry(), - // false, - // )?; - - // Ok(PartialComponents { - // backend, - // client, - // import_queue, - // keystore_container, - // task_manager, - // transaction_pool, - // select_chain: None, - // other: (block_import, telemetry, telemetry_worker_handle), - // }) -} - /// Background task used to detect changes to container chain assignment, /// and start/stop container chains on demand. The check runs on every new block. pub fn build_check_assigned_para_id( @@ -280,6 +205,33 @@ fn check_assigned_para_id( Ok(()) } +pub fn import_queue( + parachain_config: &Configuration, + node_builder: &NodeBuilder, +) -> (ParachainBlockImport, BasicQueue) { + // The nimbus import queue ONLY checks the signature correctness + // Any other checks corresponding to the author-correctness should be done + // in the runtime + let block_import = + ParachainBlockImport::new(node_builder.client.clone(), node_builder.backend.clone()); + + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + ) + .expect("function never fails"); + + (block_import, import_queue) +} + /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. @@ -305,23 +257,7 @@ async fn start_node_impl( // Create a `NodeBuilder` which helps setup parachain nodes common systems. let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; - // The nimbus import queue ONLY checks the signature correctness - // Any other checks corresponding to the author-correctness should be done - // in the runtime - let block_import = - ParachainBlockImport::new(node_builder.client.clone(), node_builder.backend.clone()); - let import_queue = nimbus_consensus::import_queue( - node_builder.client.clone(), - block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &node_builder.task_manager.spawn_essential_handle(), - parachain_config.prometheus_registry(), - false, - )?; + let (block_import, import_queue) = import_queue(¶chain_config, &node_builder); let (relay_chain_interface, collator_key) = node_builder .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) @@ -365,7 +301,7 @@ async fn start_node_impl( let sync_keystore = node_builder.keystore_container.keystore(); let mut collate_on_tanssi = None; - let mut node_builder = if validator { + let node_builder = if validator { let collator_key = collator_key .clone() .expect("Command line arguments do not allow this. qed"); @@ -521,80 +457,28 @@ pub async fn start_node_impl_container( Option Pin + Send>> + Send + Sync>>, )> { let parachain_config = prepare_node_config(parachain_config); - let block_import; - let mut telemetry; - let client; - let backend; - let mut task_manager; - let transaction_pool; - let import_queue_service; - let params_import_queue; - let keystore_container; - { - // Some fields of params are not `Send`, and that causes problems with async/await. - // We take all the needed fields here inside a block to ensure that params - // gets dropped before the first instance of `.await`. - // Change this to use the syntax `PartialComponents { client, backend, .. } = params;` - // when this issue is fixed: - // https://github.com/rust-lang/rust/issues/104883 - let params = new_partial(¶chain_config)?; - let (l_block_import, l_telemetry, _telemetry_worker_handle) = params.other; - block_import = l_block_import; - telemetry = l_telemetry; - client = params.client.clone(); - backend = params.backend.clone(); - task_manager = params.task_manager; - transaction_pool = params.transaction_pool.clone(); - import_queue_service = params.import_queue.service(); - params_import_queue = params.import_queue; - keystore_container = params.keystore_container; - } - let spawn_handle = task_manager.spawn_handle(); + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let node_builder = NodeConfig::new_builder(¶chain_config, None)?; - let force_authoring = parachain_config.force_authoring; - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); + let (block_import, import_queue) = import_queue(¶chain_config, &node_builder); log::info!("are we collators? {:?}", collator); - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - parachain_config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle, - import_queue: params_import_queue, + let node_builder = node_builder + .build_cumulus_network( + ¶chain_config, para_id, - relay_chain_interface: relay_chain_interface.clone(), - net_config, - sybil_resistance_level: CollatorSybilResistance::Resistant, - }) + import_queue, + relay_chain_interface.clone(), + ) .await?; - if parachain_config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: parachain_config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } + let force_authoring = parachain_config.force_authoring; + let prometheus_registry = parachain_config.prometheus_registry().cloned(); let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { @@ -609,231 +493,133 @@ pub async fn start_node_impl_container( }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: keystore.clone(), - backend: backend.clone(), - network: network.clone(), - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - sync_service: sync_service.clone(), - })?; + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; let announce_block = { - let sync_service = sync_service.clone(); + let sync_service = node_builder.network.sync_service.clone(); Arc::new(move |hash, data| sync_service.announce_block(hash, data)) }; let relay_chain_slot_duration = Duration::from_secs(6); - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; let mut start_collation: Option< Arc Pin + Send>> + Send + Sync>, > = None; - if collator { + let node_builder = if collator { + let (node_builder, import_queue) = node_builder.extract_import_queue_service(); + + let collator_key = collator_key + .clone() + .expect("Command line arguments do not allow this. qed"); + + let overseer_handle = relay_chain_interface + .overseer_handle() + .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + let parachain_consensus = build_consensus_container( - client.clone(), + node_builder.client.clone(), orchestrator_client.clone(), block_import, prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, + node_builder.telemetry.as_ref().map(|t| t.handle()), + &node_builder.task_manager, relay_chain_interface.clone(), orchestrator_chain_interface.clone(), - transaction_pool, - sync_service.clone(), + node_builder.transaction_pool.clone(), + node_builder.network.sync_service.clone(), keystore, force_authoring, para_id, orchestrator_para_id, )?; - let spawner = task_manager.spawn_handle(); - let params = StartCollatorParams { + // Given the sporadic nature of the explicit recovery operation and the + // possibility to retry infinite times this value is more than enough. + // In practice here we expect no more than one queued messages. + const RECOVERY_CHAN_SIZE: usize = 8; + + let (recovery_chan_tx, recovery_chan_rx) = mpsc::channel(RECOVERY_CHAN_SIZE); + + let consensus = cumulus_client_consensus_common::run_parachain_consensus( para_id, - block_status: client.clone(), - announce_block, - client: client.clone(), - task_manager: &mut task_manager, - relay_chain_interface, - spawner, - parachain_consensus, - import_queue: import_queue_service, - collator_key: collator_key.expect("Command line arguments do not allow this. qed"), - relay_chain_slot_duration, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; + node_builder.client.clone(), + relay_chain_interface.clone(), + announce_block.clone(), + Some(recovery_chan_tx), + ); + + node_builder + .task_manager + .spawn_essential_handle() + .spawn_blocking("cumulus-consensus", None, consensus); + + let pov_recovery = PoVRecovery::new( + Box::new(overseer_handle.clone()), + // We want that collators wait at maximum the relay chain slot duration before starting + // to recover blocks. Additionally, we wait at least half the slot time to give the + // relay chain the chance to increase availability. + RecoveryDelayRange { + min: relay_chain_slot_duration / 2, + max: relay_chain_slot_duration, + }, + node_builder.client.clone(), + import_queue, + relay_chain_interface.clone(), + para_id, + recovery_chan_rx, + node_builder.network.sync_service.clone(), + ); + + node_builder.task_manager.spawn_essential_handle().spawn( + "cumulus-pov-recovery", + None, + pov_recovery.run(), + ); - // Need to deconstruct it because `StartCollatorParams` does not implement Clone - let cumulus_client_collator::StartCollatorParams { + let params_generator = node_builder.cumulus_client_collator_params_generator( para_id, - runtime_api, - block_status, - announce_block, overseer_handle, - spawner, - key, + collator_key, parachain_consensus, - } = partial_start_collator(params)?; - - let collate_closure = move || async move { - // Hack to fix logs, if this future is awaited by the ContainerChainSpawner thread, - // the logs will say "Orchestrator" instead of "Container-2000". - // Wrapping the future in this function fixes that. - #[sc_tracing::logging::prefix_logs_with(container_log_str(para_id))] - async fn wrap(para_id: ParaId, f: F) -> O - where - F: Future, - { - f.await - } + ); - // TODO: change for async backing - #[allow(deprecated)] - wrap( + // Hack to fix logs, if this future is awaited by the ContainerChainSpawner thread, + // the logs will say "Orchestrator" instead of "Container-2000". + // Wrapping the future in this function fixes that. + #[sc_tracing::logging::prefix_logs_with(container_log_str(para_id))] + async fn wrap(para_id: ParaId, f: F) -> O + where + F: Future, + { + f.await + } + + start_collation = Some(Arc::new(move || { + Box::pin(wrap( para_id, - cumulus_client_collator::start_collator( - cumulus_client_collator::StartCollatorParams { - para_id, - runtime_api, - block_status, - announce_block, - overseer_handle, - spawner, - key, - parachain_consensus, - }, - ), - ) - .await; - }; - start_collation = Some(Arc::new(move || Box::pin((collate_closure.clone())()))); + #[allow(deprecated)] + cumulus_client_collator::start_collator(params_generator()), + )) + })); + + node_builder } else { - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, + node_builder.start_full_node( para_id, - relay_chain_interface, + relay_chain_interface.clone(), relay_chain_slot_duration, - import_queue: import_queue_service, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; - - // TODO: change for async backing - #[allow(deprecated)] - start_full_node(params)?; - } - - start_network.start_network(); - - Ok((task_manager, client, backend, start_collation)) -} - -// Copy of `cumulus_client_service::start_collator`, that doesn't fully start the collator: it is -// missing the final call to `cumulus_client_collator::start_collator`. -// Returns the params of the call to `cumulus_client_collator::start_collator`. -pub fn partial_start_collator<'a, Block, BS, Client, Backend, RCInterface, Spawner>( - StartCollatorParams { - block_status, - client, - announce_block, - spawner, - para_id, - task_manager, - relay_chain_interface, - parachain_consensus, - import_queue, - collator_key, - relay_chain_slot_duration, - recovery_handle, - sync_service, - }: StartCollatorParams<'a, Block, BS, Client, RCInterface, Spawner>, -) -> sc_service::error::Result< - cumulus_client_collator::StartCollatorParams, -> -where - Block: BlockT, - BS: BlockBackend + Send + Sync + 'static, - Client: Finalizer - + UsageProvider - + HeaderBackend - + Send - + Sync - + BlockBackend - + BlockchainEvents - + ProvideRuntimeApi - + 'static, - Client::Api: CollectCollationInfo, - for<'b> &'b Client: BlockImport, - Spawner: SpawnNamed + Clone + Send + Sync + 'static, - RCInterface: RelayChainInterface + Clone + 'static, - Backend: BackendT + 'static, -{ - // Given the sporadic nature of the explicit recovery operation and the - // possibility to retry infinite times this value is more than enough. - // In practice here we expect no more than one queued messages. - const RECOVERY_CHAN_SIZE: usize = 8; - - let (recovery_chan_tx, recovery_chan_rx) = mpsc::channel(RECOVERY_CHAN_SIZE); - - let consensus = cumulus_client_consensus_common::run_parachain_consensus( - para_id, - client.clone(), - relay_chain_interface.clone(), - announce_block.clone(), - Some(recovery_chan_tx), - ); - - task_manager - .spawn_essential_handle() - .spawn_blocking("cumulus-consensus", None, consensus); - - let pov_recovery = PoVRecovery::new( - recovery_handle, - // We want that collators wait at maximum the relay chain slot duration before starting - // to recover blocks. Additionally, we wait at least half the slot time to give the - // relay chain the chance to increase availability. - RecoveryDelayRange { - min: relay_chain_slot_duration / 2, - max: relay_chain_slot_duration, - }, - client.clone(), - import_queue, - relay_chain_interface.clone(), - para_id, - recovery_chan_rx, - sync_service, - ); - - task_manager - .spawn_essential_handle() - .spawn("cumulus-pov-recovery", None, pov_recovery.run()); + )? + }; - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; + node_builder.network.start_network.start_network(); - Ok(cumulus_client_collator::StartCollatorParams { - runtime_api: client, - block_status, - announce_block, - overseer_handle, - spawner, - para_id, - key: collator_key, - parachain_consensus, - }) + Ok(( + node_builder.task_manager, + node_builder.client, + node_builder.backend, + start_collation, + )) } /// Build the import queue for the parachain runtime (manual seal). From edf8a833d5d55420b11dfdd253e0053fb74adbc8 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:19:29 +0100 Subject: [PATCH 21/29] cleanup --- .../templates/frontier/node/src/service.rs | 7 ++----- .../templates/simple/node/src/service.rs | 21 +++++-------------- 2 files changed, 7 insertions(+), 21 deletions(-) diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index c6a50fc9b..bca6a1547 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -35,6 +35,7 @@ use { sc_executor::NativeElseWasmExecutor, sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, sc_telemetry::{Telemetry, TelemetryWorkerHandle}, + sp_blockchain::HeaderBackend, sp_consensus_aura::SlotDuration, sp_core::{Pair, H256}, std::{ @@ -401,11 +402,10 @@ fn get_aura_id_from_seed(seed: &str) -> NimbusId { .into() } -use {sp_blockchain::HeaderBackend, std::str::FromStr}; /// Builds a new development service. This service uses manual seal, and mocks /// the parachain inherent. pub async fn start_dev_node( - mut parachain_config: Configuration, + parachain_config: Configuration, sealing: Sealing, rpc_config: crate::cli::RpcConfig, para_id: ParaId, @@ -452,9 +452,6 @@ pub async fn start_dev_node( false, )?; - let validator = parachain_config.role.is_authority(); - let force_authoring = parachain_config.force_authoring; - // Build a Substrate Network. (not cumulus since it is a dev node, it mocks // the relaychain) let mut node_builder = node_builder.build_substrate_network(¶chain_config, import_queue)?; diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 4be4239e6..2cefdca04 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -16,19 +16,10 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -// std -use std::{sync::Arc, time::Duration}; - -use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfiguration}; -// Local Runtime Types -use { - container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, - node_common::service::NodeBuilder, -}; - -// Cumulus Imports #[allow(deprecated)] use { + container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, + cumulus_client_cli::CollatorOptions, cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, cumulus_client_service::{ build_relay_chain_interface, prepare_node_config, start_full_node, CollatorSybilResistance, @@ -36,18 +27,16 @@ use { }, cumulus_primitives_core::ParaId, cumulus_relay_chain_interface::RelayChainInterface, -}; - -// Substrate Imports -use { futures::FutureExt, sc_client_api::Backend, sc_consensus::ImportQueue, sc_executor::NativeElseWasmExecutor, + sc_network::config::FullNetworkConfiguration, sc_network::NetworkBlock, sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, sc_telemetry::{Telemetry, TelemetryWorkerHandle}, sc_transaction_pool_api::OffchainTransactionPoolFactory, + std::{sync::Arc, time::Duration}, }; /// Native executor type. @@ -78,7 +67,7 @@ type ParachainBlockImport = TParachainBlockImport, P /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. pub fn new_partial( - config: &Configuration, + _config: &Configuration, ) -> Result< PartialComponents< ParachainClient, From 7bf1fb20e514c1bd698c548cad43a4617779c4d5 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:57:22 +0100 Subject: [PATCH 22/29] clippy --- client/node-common/src/service.rs | 24 +++++++------------ .../templates/frontier/node/src/rpc/mod.rs | 2 +- .../templates/frontier/node/src/service.rs | 2 +- node/src/container_chain_spawner.rs | 6 ++--- node/src/service.rs | 12 ++++------ 5 files changed, 18 insertions(+), 28 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index 65e5d99ba..b571c65b5 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -65,7 +65,6 @@ pub trait Config { type RuntimeApi; type ParachainNativeExecutor; - #[must_use] fn new_builder( parachain_config: &Configuration, hwbench: Option, @@ -169,7 +168,6 @@ where /// node. However it only starts telemetry, and doesn't provide any /// network-dependent objects (as it requires an import queue, which usually /// is different for each node). - #[must_use] fn new( parachain_config: &Configuration, hwbench: Option, @@ -260,7 +258,6 @@ where + BlockBuilder> + cumulus_primitives_core::CollectCollationInfo>, { - #[must_use] pub async fn build_relay_chain_interface( &mut self, parachain_config: &Configuration, @@ -272,7 +269,7 @@ where )> { build_relay_chain_interface( polkadot_config, - ¶chain_config, + parachain_config, self.telemetry_worker_handle.clone(), &mut self.task_manager, collator_options.clone(), @@ -287,7 +284,6 @@ where /// /// Can only be called once on a `NodeBuilder` that doesn't have yet network /// data. - #[must_use] pub async fn build_cumulus_network( self, parachain_config: &Configuration, @@ -329,13 +325,13 @@ where let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - parachain_config: ¶chain_config, + parachain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle, - import_queue: import_queue, + import_queue, para_id, - relay_chain_interface: relay_chain_interface, + relay_chain_interface, net_config, sybil_resistance_level: CollatorSybilResistance::Resistant, }) @@ -367,7 +363,6 @@ where /// /// Can only be called once on a `NodeBuilder` that doesn't have yet network /// data. - #[must_use] pub fn build_substrate_network( self, parachain_config: &Configuration, @@ -409,7 +404,7 @@ where client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue, + import_queue, warp_sync_params: None, block_announce_validator_builder: None, net_config, @@ -440,7 +435,6 @@ where /// node. It consumes `self.tx_handler_controller` in the process, which means /// it can only be called once, and any other code that would need this /// controller should interact with it before calling this function. - #[must_use] pub fn spawn_common_tasks( self, parachain_config: Configuration, @@ -519,11 +513,11 @@ where })?; if let Some(hwbench) = &hwbench { - sc_sysinfo::print_hwbench(&hwbench); + sc_sysinfo::print_hwbench(hwbench); // Here you can check whether the hardware meets your chains' requirements. Putting a link // in there and swapping out the requirements for your own are probably a good idea. The // requirements for a para-chain are dictated by its relay-chain. - if collator && !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) { + if collator && !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(hwbench) { log::warn!( "⚠️ The hardware does not meet the minimal requirements for role 'Authority'." ); @@ -689,7 +683,7 @@ where announce_block, task_manager: &mut task_manager, para_id, - relay_chain_interface: relay_chain_interface.clone(), + relay_chain_interface, relay_chain_slot_duration, import_queue: import_queue_service, recovery_handle: Box::new(overseer_handle), @@ -867,7 +861,7 @@ where announce_block: announce_block.clone(), overseer_handle: overseer_handle.clone(), spawner: spawner.clone(), - para_id: para_id.clone(), + para_id: para_id, key: collator_key.clone(), parachain_consensus: parachain_consensus.clone(), } diff --git a/container-chains/templates/frontier/node/src/rpc/mod.rs b/container-chains/templates/frontier/node/src/rpc/mod.rs index 6684bec26..248b281f3 100644 --- a/container-chains/templates/frontier/node/src/rpc/mod.rs +++ b/container-chains/templates/frontier/node/src/rpc/mod.rs @@ -206,7 +206,7 @@ where EthFilter::new( client.clone(), frontier_backend, - graph.clone(), + graph, filter_pool, 500_usize, // max stored filters max_past_logs, diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index bca6a1547..f4e7983d6 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -415,7 +415,7 @@ pub async fn start_dev_node( // let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; + let node_builder = NodeConfig::new_builder(¶chain_config, hwbench)?; // Frontier specific stuff let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs index 6db612f8e..6486b387a 100644 --- a/node/src/container_chain_spawner.rs +++ b/node/src/container_chain_spawner.rs @@ -648,7 +648,7 @@ fn open_and_maybe_delete_db( > max_block_diff_allowed { // if the diff is big, delete db and restart using warp sync - delete_container_chain_db(&db_path); + delete_container_chain_db(db_path); return Ok(()); } } @@ -678,7 +678,7 @@ fn open_and_maybe_delete_db( "Chain spec genesis {:?} did not match with any container genesis - Restarting...", container_client_genesis_hash ); - delete_container_chain_db(&db_path); + delete_container_chain_db(db_path); return Ok(()); } @@ -691,7 +691,7 @@ fn open_and_maybe_delete_db( // Collator2002-01/data/containers/chains/simple_container_2002 fn delete_container_chain_db(db_path: &Path) { if db_path.exists() { - std::fs::remove_dir_all(&db_path).expect("failed to remove old container chain db"); + std::fs::remove_dir_all(db_path).expect("failed to remove old container chain db"); } } diff --git a/node/src/service.rs b/node/src/service.rs index 72d9dc586..02b6c4d8a 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -306,10 +306,6 @@ async fn start_node_impl( .clone() .expect("Command line arguments do not allow this. qed"); - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - // Start task which detects para id assignment, and starts/stops container chains. // Note that if this node was started without a `container_chain_config`, we don't // support collation on container chains, so there is no need to detect changes to assignment @@ -338,7 +334,7 @@ async fn start_node_impl( let params_generator = node_builder.cumulus_client_collator_params_generator( para_id, - overseer_handle, + overseer_handle.clone(), collator_key.clone(), parachain_consensus.clone(), ); @@ -545,7 +541,7 @@ pub async fn start_node_impl_container( para_id, node_builder.client.clone(), relay_chain_interface.clone(), - announce_block.clone(), + announce_block, Some(recovery_chan_tx), ); @@ -946,7 +942,7 @@ pub fn start_dev_node( let parachain_config = prepare_node_config(orchestrator_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; + let node_builder = NodeConfig::new_builder(¶chain_config, hwbench)?; // This node block import. let block_import = DevParachainBlockImport::new(node_builder.client.clone()); @@ -1030,7 +1026,7 @@ pub fn start_dev_node( ), raw_downward_messages: downward_xcm_receiver.drain().collect(), raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), - additional_key_values: Some(mocked_author_noting.get_key_values().clone()), + additional_key_values: Some(mocked_author_noting.get_key_values()), }; Ok((time, mocked_parachain, mocked_author_noting)) From b39b13d02547a7f56f0f4ace91d69b9beb17ae85 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 20 Nov 2023 14:47:26 +0100 Subject: [PATCH 23/29] remove new_partial in frontier template --- .../templates/frontier/node/src/command.rs | 30 +-- .../templates/frontier/node/src/service.rs | 194 +++++------------- 2 files changed, 64 insertions(+), 160 deletions(-) diff --git a/container-chains/templates/frontier/node/src/command.rs b/container-chains/templates/frontier/node/src/command.rs index 3fef78d05..dcb94666c 100644 --- a/container-chains/templates/frontier/node/src/command.rs +++ b/container-chains/templates/frontier/node/src/command.rs @@ -18,13 +18,14 @@ use { crate::{ chain_spec, cli::{Cli, RelayChainCli, Subcommand}, - service::{frontier_database_dir, new_partial}, + service::{self, frontier_database_dir, NodeBuilderConfig}, }, container_chain_template_frontier_runtime::Block, cumulus_client_cli::generate_genesis_block, cumulus_primitives_core::ParaId, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, + node_common::service::Config as _, parity_scale_codec::Encode, polkadot_cli::IdentifyVariant, sc_cli::{ @@ -41,9 +42,10 @@ use { }; #[cfg(feature = "try-runtime")] -use crate::client::TemplateRuntimeExecutor; -#[cfg(feature = "try-runtime")] -use try_runtime_cli::block_building_info::substrate_info; +use { + crate::client::TemplateRuntimeExecutor, try_runtime_cli::block_building_info::substrate_info, +}; + #[cfg(feature = "try-runtime")] const SLOT_DURATION: u64 = 12; @@ -134,9 +136,11 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; runner.async_run(|mut $config| { - let $components = new_partial(&mut $config, false)?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) + let $components = NodeBuilderConfig::new_builder(&mut $config, None)?; + let inner = { $( $code )* }; + + let task_manager = $components.task_manager; + inner.map(|v| (v, task_manager)) }) }} } @@ -169,7 +173,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::CheckBlock(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::ExportBlocks(cmd)) => { @@ -184,7 +189,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::ImportBlocks(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::Revert(cmd)) => { @@ -232,7 +238,7 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|mut config| { - let partials = new_partial(&mut config, false)?; + let partials = NodeBuilderConfig::new_builder(&mut config, None)?; cmd.run(&*config.chain_spec, &*partials.client) }) } @@ -257,7 +263,7 @@ pub fn run() -> Result<()> { } } BenchmarkCmd::Block(cmd) => runner.sync_run(|mut config| { - let partials = new_partial(&mut config, false)?; + let partials = NodeBuilderConfig::new_builder(&mut config, None)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] @@ -268,7 +274,7 @@ pub fn run() -> Result<()> { )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|mut config| { - let partials = new_partial(&mut config, false)?; + let partials = NodeBuilderConfig::new_builder(&mut config, None)?; let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index f4e7983d6..011742d82 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -21,7 +21,7 @@ use { crate::client::TemplateRuntimeExecutor, container_chain_template_frontier_runtime::{opaque::Block, RuntimeApi}, cumulus_client_cli::CollatorOptions, - cumulus_client_consensus_common::ParachainBlockImport, + cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, cumulus_client_service::prepare_node_config, cumulus_primitives_core::ParaId, cumulus_primitives_parachain_inherent::{ @@ -31,10 +31,10 @@ use { fc_db::DatabaseSource, fc_rpc_core::types::{FeeHistoryCache, FilterPool}, nimbus_primitives::NimbusId, - node_common::service::{Config as NodeBuilderConfig, ManualSealConfiguration, Sealing}, + node_common::service::{Config as _, ManualSealConfiguration, NodeBuilder, Sealing}, + sc_consensus::BasicQueue, sc_executor::NativeElseWasmExecutor, - sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, - sc_telemetry::{Telemetry, TelemetryWorkerHandle}, + sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, sp_blockchain::HeaderBackend, sp_consensus_aura::SlotDuration, sp_core::{Pair, H256}, @@ -45,23 +45,22 @@ use { }, }; -struct NodeConfig; -impl NodeBuilderConfig for NodeConfig { +pub type ParachainExecutor = NativeElseWasmExecutor; +type ParachainClient = TFullClient; +type ParachainBackend = TFullBackend; +type ParachainBlockImport = TParachainBlockImport< + Block, + FrontierBlockImport, ParachainClient>, + ParachainBackend, +>; + +pub struct NodeBuilderConfig; +impl node_common::service::Config for NodeBuilderConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = TemplateRuntimeExecutor; } -/// Native executor type. - -pub type ParachainExecutor = NativeElseWasmExecutor; - -type ParachainClient = TFullClient; - -type ParachainBackend = TFullBackend; - -type MaybeSelectChain = Option>; - pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf { let config_dir = config .base_path @@ -132,98 +131,33 @@ impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { } } -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - _config: &mut Configuration, - _dev_service: bool, -) -> Result< - PartialComponents< - ParachainClient, - ParachainBackend, - MaybeSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - ParachainBlockImport< - Block, - FrontierBlockImport, ParachainClient>, - ParachainBackend, - >, - Option, - Option, - Option, - fc_db::Backend, - FeeHistoryCache, - ), - >, - sc_service::Error, -> { - todo!() - - // Use ethereum style for subscription ids - // config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); - - // let NodeBuilder { - // client, - // backend, - // transaction_pool, - // telemetry, - // telemetry_worker_handle, - // task_manager, - // keystore_container, - // } = node_common::service::new_partial(config)?; - - // let maybe_select_chain = if dev_service { - // Some(sc_consensus::LongestChain::new(backend.clone())) - // } else { - // None - // }; - - // let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); - // let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); - - // let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend(client.clone(), config)?); - - // let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone()); - - // let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( - // frontier_block_import, - // backend.clone(), - // ); - - // let import_queue = nimbus_consensus::import_queue( - // client.clone(), - // parachain_block_import.clone(), - // move |_, _| async move { - // let time = sp_timestamp::InherentDataProvider::from_system_time(); - - // Ok((time,)) - // }, - // &task_manager.spawn_essential_handle(), - // config.prometheus_registry(), - // !dev_service, - // )?; - - // Ok(PartialComponents { - // backend, - // client, - // import_queue, - // keystore_container, - // task_manager, - // transaction_pool, - // select_chain: maybe_select_chain, - // other: ( - // parachain_block_import, - // filter_pool, - // telemetry, - // telemetry_worker_handle, - // frontier_backend, - // fee_history_cache, - // ), - // }) +pub fn import_queue( + parachain_config: &Configuration, + node_builder: &NodeBuilder, +) -> (ParachainBlockImport, BasicQueue) { + let frontier_block_import = + FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); + + // The parachain block import and import queue + let block_import = cumulus_client_consensus_common::ParachainBlockImport::new( + frontier_block_import, + node_builder.backend.clone(), + ); + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + ) + .expect("function never fails"); + + (block_import, import_queue) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. @@ -241,7 +175,7 @@ async fn start_node_impl( let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; + let mut node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench.clone())?; // Frontier specific stuff let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); @@ -250,8 +184,6 @@ async fn start_node_impl( node_builder.client.clone(), ¶chain_config, )?); - let frontier_block_import = - FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); let overrides = crate::rpc::overrides_handle(node_builder.client.clone()); let fee_history_limit = rpc_config.fee_history_limit; @@ -260,23 +192,7 @@ async fn start_node_impl( > = Default::default(); let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); - // The parachain block import and import queue - let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( - frontier_block_import, - node_builder.backend.clone(), - ); - let import_queue = nimbus_consensus::import_queue( - node_builder.client.clone(), - parachain_block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &node_builder.task_manager.spawn_essential_handle(), - parachain_config.prometheus_registry(), - false, - )?; + let (_, import_queue) = import_queue(¶chain_config, &node_builder); // Relay chain interface let (relay_chain_interface, _collator_key) = node_builder @@ -415,7 +331,7 @@ pub async fn start_dev_node( // let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = NodeConfig::new_builder(¶chain_config, hwbench)?; + let node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench)?; // Frontier specific stuff let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); @@ -424,8 +340,6 @@ pub async fn start_dev_node( node_builder.client.clone(), ¶chain_config, )?); - let frontier_block_import = - FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); let overrides = crate::rpc::overrides_handle(node_builder.client.clone()); let fee_history_limit = rpc_config.fee_history_limit; @@ -434,23 +348,7 @@ pub async fn start_dev_node( > = Default::default(); let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); - // The parachain block import and import queue - let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new( - frontier_block_import, - node_builder.backend.clone(), - ); - let import_queue = nimbus_consensus::import_queue( - node_builder.client.clone(), - parachain_block_import.clone(), - move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - Ok((time,)) - }, - &node_builder.task_manager.spawn_essential_handle(), - parachain_config.prometheus_registry(), - false, - )?; + let (parachain_block_import, import_queue) = import_queue(¶chain_config, &node_builder); // Build a Substrate Network. (not cumulus since it is a dev node, it mocks // the relaychain) From 084253e3cd0dd88d45084e7deb7b479c398689c9 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Mon, 20 Nov 2023 15:09:24 +0100 Subject: [PATCH 24/29] remove new_partial from substrate template --- .../templates/frontier/node/src/service.rs | 2 +- .../templates/simple/node/src/command.rs | 21 +- .../templates/simple/node/src/service.rs | 242 +++++------------- 3 files changed, 74 insertions(+), 191 deletions(-) diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 011742d82..b6bd0d453 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -45,7 +45,7 @@ use { }, }; -pub type ParachainExecutor = NativeElseWasmExecutor; +type ParachainExecutor = NativeElseWasmExecutor; type ParachainClient = TFullClient; type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport< diff --git a/container-chains/templates/simple/node/src/command.rs b/container-chains/templates/simple/node/src/command.rs index 2fce9c243..98d03c325 100644 --- a/container-chains/templates/simple/node/src/command.rs +++ b/container-chains/templates/simple/node/src/command.rs @@ -18,13 +18,14 @@ use { crate::{ chain_spec, cli::{Cli, RelayChainCli, Subcommand}, - service::new_partial, + service::{self, NodeBuilderConfig}, }, container_chain_template_simple_runtime::Block, cumulus_client_cli::generate_genesis_block, cumulus_primitives_core::ParaId, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, + node_common::service::Config as _, parity_scale_codec::Encode, sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, @@ -130,9 +131,11 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; runner.async_run(|$config| { - let $components = new_partial(&$config)?; + let $components = NodeBuilderConfig::new_builder(&$config, None)?; + let inner = { $( $code )* }; + let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) + inner.map(|v| (v, task_manager)) }) }} } @@ -165,7 +168,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::CheckBlock(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::ExportBlocks(cmd)) => { @@ -180,7 +184,8 @@ pub fn run() -> Result<()> { } Some(Subcommand::ImportBlocks(cmd)) => { construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) + let (_, import_queue) = service::import_queue(&config, &components); + Ok(cmd.run(components.client, import_queue)) }) } Some(Subcommand::Revert(cmd)) => { @@ -212,7 +217,7 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { - let partials = new_partial(&config)?; + let partials = NodeBuilderConfig::new_builder(&config, None)?; cmd.run(&*config.chain_spec, &*partials.client) }) } @@ -237,7 +242,7 @@ pub fn run() -> Result<()> { } } BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - let partials = new_partial(&config)?; + let partials = NodeBuilderConfig::new_builder(&config, None)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] @@ -248,7 +253,7 @@ pub fn run() -> Result<()> { )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - let partials = new_partial(&config)?; + let partials = NodeBuilderConfig::new_builder(&config, None)?; let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 2cefdca04..74d251d2d 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -16,26 +16,20 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. +use { + node_common::service::{Config as _, NodeBuilder}, + sc_consensus::BasicQueue, +}; + #[allow(deprecated)] use { container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, cumulus_client_cli::CollatorOptions, cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, - cumulus_client_service::{ - build_relay_chain_interface, prepare_node_config, start_full_node, CollatorSybilResistance, - StartFullNodeParams, - }, + cumulus_client_service::prepare_node_config, cumulus_primitives_core::ParaId, - cumulus_relay_chain_interface::RelayChainInterface, - futures::FutureExt, - sc_client_api::Backend, - sc_consensus::ImportQueue, sc_executor::NativeElseWasmExecutor, - sc_network::config::FullNetworkConfiguration, - sc_network::NetworkBlock, - sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager}, - sc_telemetry::{Telemetry, TelemetryWorkerHandle}, - sc_transaction_pool_api::OffchainTransactionPoolFactory, + sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, std::{sync::Arc, time::Duration}, }; @@ -55,77 +49,49 @@ impl sc_executor::NativeExecutionDispatch for ParachainNativeExecutor { } type ParachainExecutor = NativeElseWasmExecutor; - type ParachainClient = TFullClient; - type ParachainBackend = TFullBackend; - type ParachainBlockImport = TParachainBlockImport, ParachainBackend>; -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -pub fn new_partial( - _config: &Configuration, -) -> Result< - PartialComponents< - ParachainClient, - ParachainBackend, - (), - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - ParachainBlockImport, - Option, - Option, - ), - >, - sc_service::Error, -> { - todo!() - // let NodeBuilder { - // client, - // backend, - // transaction_pool, - // telemetry, - // telemetry_worker_handle, - // task_manager, - // keystore_container, - // } = node_common::service::NodeBuilder::new(config)?; - - // let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); - - // let import_queue = nimbus_consensus::import_queue( - // client.clone(), - // block_import.clone(), - // move |_, _| async move { - // let time = sp_timestamp::InherentDataProvider::from_system_time(); +pub struct NodeBuilderConfig; +impl node_common::service::Config for NodeBuilderConfig { + type Block = Block; + type RuntimeApi = RuntimeApi; + type ParachainNativeExecutor = ParachainNativeExecutor; +} - // Ok((time,)) - // }, - // &task_manager.spawn_essential_handle(), - // config.prometheus_registry(), - // false, - // )?; +pub fn import_queue( + parachain_config: &Configuration, + node_builder: &NodeBuilder, +) -> (ParachainBlockImport, BasicQueue) { + // The nimbus import queue ONLY checks the signature correctness + // Any other checks corresponding to the author-correctness should be done + // in the runtime + let block_import = + ParachainBlockImport::new(node_builder.client.clone(), node_builder.backend.clone()); + + let import_queue = nimbus_consensus::import_queue( + node_builder.client.clone(), + block_import.clone(), + move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + Ok((time,)) + }, + &node_builder.task_manager.spawn_essential_handle(), + parachain_config.prometheus_registry(), + false, + ) + .expect("function never fails"); - // Ok(PartialComponents { - // backend, - // client, - // import_queue, - // keystore_container, - // task_manager, - // transaction_pool, - // select_chain: (), - // other: (block_import, telemetry, telemetry_worker_handle), - // }) + (block_import, import_queue) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( +pub async fn start_parachain_node( parachain_config: Configuration, polkadot_config: Configuration, collator_options: CollatorOptions, @@ -134,66 +100,29 @@ async fn start_node_impl( ) -> sc_service::error::Result<(TaskManager, Arc)> { let parachain_config = prepare_node_config(parachain_config); - let params = new_partial(¶chain_config)?; - let (_block_import, mut telemetry, telemetry_worker_handle) = params.other; + // Create a `NodeBuilder` which helps setup parachain nodes common systems. + let mut node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench.clone())?; - let client = params.client.clone(); - let backend = params.backend.clone(); - let mut task_manager = params.task_manager; - - let (relay_chain_interface, _collator_key) = build_relay_chain_interface( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - collator_options.clone(), - hwbench.clone(), - ) - .await - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; + let (_, import_queue) = import_queue(¶chain_config, &node_builder); - let transaction_pool = params.transaction_pool.clone(); - let import_queue_service = params.import_queue.service(); - let net_config = FullNetworkConfiguration::new(¶chain_config.network); + // Relay chain interface + let (relay_chain_interface, _collator_key) = node_builder + .build_relay_chain_interface(¶chain_config, polkadot_config, collator_options.clone()) + .await?; - let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = - cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams { - parachain_config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue: params.import_queue, + // Build cumulus network, allowing to access network-related services. + let node_builder = node_builder + .build_cumulus_network( + ¶chain_config, para_id, - relay_chain_interface: relay_chain_interface.clone(), - net_config, - sybil_resistance_level: CollatorSybilResistance::Resistant, - }) + import_queue, + relay_chain_interface.clone(), + ) .await?; - if parachain_config.offchain_worker.enabled { - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(params.keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: network.clone(), - is_validator: parachain_config.role.is_authority(), - enable_http_requests: false, - custom_extensions: move |_| vec![], - }) - .run(client.clone(), task_manager.spawn_handle()) - .boxed(), - ); - } - let rpc_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); + let client = node_builder.client.clone(); + let transaction_pool = node_builder.transaction_pool.clone(); Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { @@ -206,67 +135,16 @@ async fn start_node_impl( }) }; - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.keystore(), - backend, - network, - system_rpc_tx, - tx_handler_controller, - telemetry: telemetry.as_mut(), - sync_service: sync_service.clone(), - })?; - - let overseer_handle = relay_chain_interface - .overseer_handle() - .map_err(|e| sc_service::Error::Application(Box::new(e)))?; - - let announce_block = { - let sync_service = sync_service.clone(); - Arc::new(move |hash, data| sync_service.announce_block(hash, data)) - }; + let node_builder = node_builder.spawn_common_tasks(parachain_config, rpc_builder)?; let relay_chain_slot_duration = Duration::from_secs(6); - - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, + let node_builder = node_builder.start_full_node( para_id, - relay_chain_interface, + relay_chain_interface.clone(), relay_chain_slot_duration, - import_queue: import_queue_service, - recovery_handle: Box::new(overseer_handle), - sync_service, - }; + )?; - // TODO: change for async backing - #[allow(deprecated)] - start_full_node(params)?; + node_builder.network.start_network.start_network(); - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Start a parachain node. -pub async fn start_parachain_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - para_id: ParaId, - hwbench: Option, -) -> sc_service::error::Result<(TaskManager, Arc)> { - start_node_impl( - parachain_config, - polkadot_config, - collator_options, - para_id, - hwbench, - ) - .await + Ok((node_builder.task_manager, node_builder.client)) } From 0f35386b2007ce79d856a00f88f1fbed963a955c Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Tue, 21 Nov 2023 13:49:42 +0100 Subject: [PATCH 25/29] cleanup --- client/node-common/src/service.rs | 23 +++++++++++-------- .../templates/frontier/node/src/command.rs | 2 +- .../templates/frontier/node/src/service.rs | 4 ++-- .../templates/simple/node/src/command.rs | 2 +- .../templates/simple/node/src/service.rs | 4 ++-- node/src/command.rs | 2 +- node/src/container_chain_spawner.rs | 2 +- node/src/service.rs | 2 +- 8 files changed, 22 insertions(+), 19 deletions(-) diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs index b571c65b5..3c5d9a964 100644 --- a/client/node-common/src/service.rs +++ b/client/node-common/src/service.rs @@ -14,8 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use sc_service::SpawnTaskHandle; - use { async_io::Timer, core::time::Duration, @@ -45,7 +43,8 @@ use { sc_network_transactions::TransactionsHandlerController, sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}, sc_service::{ - Configuration, KeystoreContainer, NetworkStarter, TFullBackend, TFullClient, TaskManager, + Configuration, KeystoreContainer, NetworkStarter, SpawnTaskHandle, TFullBackend, + TFullClient, TaskManager, }, sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, sc_transaction_pool_api::OffchainTransactionPoolFactory, @@ -60,11 +59,15 @@ use { std::{str::FromStr, sync::Arc}, }; -pub trait Config { +/// Trait to configure the main types the builder rely on, bundled in a single +/// type to reduce verbosity and the amount of type parameters. +pub trait NodeBuilderConfig { type Block; type RuntimeApi; type ParachainNativeExecutor; + /// Create a new `NodeBuilder` using the types of this `Config`, along + /// with the parachain `Configuration` and an optional `HwBench`. fn new_builder( parachain_config: &Configuration, hwbench: Option, @@ -82,11 +85,11 @@ pub trait Config { } } -pub type BlockOf = ::Block; +pub type BlockOf = ::Block; pub type BlockHashOf = as cumulus_primitives_core::BlockT>::Hash; pub type BlockHeaderOf = as cumulus_primitives_core::BlockT>::Header; -pub type RuntimeApiOf = ::RuntimeApi; -pub type ParachainNativeExecutorOf = ::ParachainNativeExecutor; +pub type RuntimeApiOf = ::RuntimeApi; +pub type ParachainNativeExecutorOf = ::ParachainNativeExecutor; pub type ExecutorOf = NativeElseWasmExecutor>; pub type ClientOf = TFullClient, RuntimeApiOf, ExecutorOf>; pub type BackendOf = TFullBackend>; @@ -109,7 +112,7 @@ pub type ParachainConsensusOf = Box>>; // are still required since Rust can't infer the types in the `new` function // that doesn't take `self`. pub struct NodeBuilder< - T: Config, + T: NodeBuilderConfig, // `(cumulus_client_service/sc_service)::build_network` returns many important systems, // but can only be called with an `import_queue` which can be different in // each node. For that reason it is a `()` when calling `new`, then the @@ -157,7 +160,7 @@ pub struct Network { // `new` function doesn't take self, and the Rust compiler cannot infer that // only one type T implements `TypeIdentity`. With thus need a separate impl // block with concrete types `()`. -impl NodeBuilder +impl NodeBuilder where BlockOf: cumulus_primitives_core::BlockT, ParachainNativeExecutorOf: NativeExecutionDispatch + 'static, @@ -248,7 +251,7 @@ where } } -impl +impl NodeBuilder where BlockOf: cumulus_primitives_core::BlockT, diff --git a/container-chains/templates/frontier/node/src/command.rs b/container-chains/templates/frontier/node/src/command.rs index dcb94666c..78ac4ab2f 100644 --- a/container-chains/templates/frontier/node/src/command.rs +++ b/container-chains/templates/frontier/node/src/command.rs @@ -25,7 +25,7 @@ use { cumulus_primitives_core::ParaId, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, - node_common::service::Config as _, + node_common::service::NodeBuilderConfig as _, parity_scale_codec::Encode, polkadot_cli::IdentifyVariant, sc_cli::{ diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index b6bd0d453..7ffa6ff77 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -31,7 +31,7 @@ use { fc_db::DatabaseSource, fc_rpc_core::types::{FeeHistoryCache, FilterPool}, nimbus_primitives::NimbusId, - node_common::service::{Config as _, ManualSealConfiguration, NodeBuilder, Sealing}, + node_common::service::{NodeBuilderConfig as _, ManualSealConfiguration, NodeBuilder, Sealing}, sc_consensus::BasicQueue, sc_executor::NativeElseWasmExecutor, sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, @@ -55,7 +55,7 @@ type ParachainBlockImport = TParachainBlockImport< >; pub struct NodeBuilderConfig; -impl node_common::service::Config for NodeBuilderConfig { +impl node_common::service::NodeBuilderConfig for NodeBuilderConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = TemplateRuntimeExecutor; diff --git a/container-chains/templates/simple/node/src/command.rs b/container-chains/templates/simple/node/src/command.rs index 98d03c325..c5999b438 100644 --- a/container-chains/templates/simple/node/src/command.rs +++ b/container-chains/templates/simple/node/src/command.rs @@ -25,7 +25,7 @@ use { cumulus_primitives_core::ParaId, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, - node_common::service::Config as _, + node_common::service::NodeBuilderConfig as _, parity_scale_codec::Encode, sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 74d251d2d..2e16fe313 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -17,7 +17,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use { - node_common::service::{Config as _, NodeBuilder}, + node_common::service::{NodeBuilderConfig as _, NodeBuilder}, sc_consensus::BasicQueue, }; @@ -54,7 +54,7 @@ type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport, ParachainBackend>; pub struct NodeBuilderConfig; -impl node_common::service::Config for NodeBuilderConfig { +impl node_common::service::NodeBuilderConfig for NodeBuilderConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = ParachainNativeExecutor; diff --git a/node/src/command.rs b/node/src/command.rs index ddffc9b04..851d672f4 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -25,7 +25,7 @@ use { dancebox_runtime::Block, frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}, log::{info, warn}, - node_common::service::Config as _, + node_common::service::NodeBuilderConfig as _, parity_scale_codec::Encode, sc_cli::{ ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs index 6486b387a..3a7b8a592 100644 --- a/node/src/container_chain_spawner.rs +++ b/node/src/container_chain_spawner.rs @@ -32,7 +32,7 @@ use { cumulus_relay_chain_interface::RelayChainInterface, dancebox_runtime::{AccountId, Block, BlockNumber}, futures::FutureExt, - node_common::service::Config, + node_common::service::NodeBuilderConfig, pallet_author_noting_runtime_api::AuthorNotingApi, pallet_registrar_runtime_api::RegistrarApi, polkadot_primitives::CollatorPair, diff --git a/node/src/service.rs b/node/src/service.rs index 02b6c4d8a..32b49994f 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -41,7 +41,7 @@ use { dancebox_runtime::{opaque::Block, RuntimeApi}, futures::{channel::mpsc, StreamExt}, nimbus_primitives::NimbusPair, - node_common::service::Config as NodeBuilderConfig, + node_common::service::NodeBuilderConfig as NodeBuilderConfig, node_common::service::{ManualSealConfiguration, NodeBuilder, Sealing}, pallet_registrar_runtime_api::RegistrarApi, polkadot_cli::ProvideRuntimeApi, From be5f9c13ef0f40aa10f4a45dc562f456d3f0ad30 Mon Sep 17 00:00:00 2001 From: girazoki Date: Wed, 22 Nov 2023 11:52:38 +0100 Subject: [PATCH 26/29] fmt --- container-chains/templates/frontier/node/src/service.rs | 2 +- container-chains/templates/simple/node/src/service.rs | 2 +- node/src/service.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index 7ffa6ff77..ac0e7b446 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -31,7 +31,7 @@ use { fc_db::DatabaseSource, fc_rpc_core::types::{FeeHistoryCache, FilterPool}, nimbus_primitives::NimbusId, - node_common::service::{NodeBuilderConfig as _, ManualSealConfiguration, NodeBuilder, Sealing}, + node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig as _, Sealing}, sc_consensus::BasicQueue, sc_executor::NativeElseWasmExecutor, sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index 2e16fe313..aad3aa939 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -17,7 +17,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use { - node_common::service::{NodeBuilderConfig as _, NodeBuilder}, + node_common::service::{NodeBuilder, NodeBuilderConfig as _}, sc_consensus::BasicQueue, }; diff --git a/node/src/service.rs b/node/src/service.rs index 32b49994f..0e47cafa3 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -41,7 +41,7 @@ use { dancebox_runtime::{opaque::Block, RuntimeApi}, futures::{channel::mpsc, StreamExt}, nimbus_primitives::NimbusPair, - node_common::service::NodeBuilderConfig as NodeBuilderConfig, + node_common::service::NodeBuilderConfig, node_common::service::{ManualSealConfiguration, NodeBuilder, Sealing}, pallet_registrar_runtime_api::RegistrarApi, polkadot_cli::ProvideRuntimeApi, From aeb34e960681499d0e4d07e91e5798042c089257 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:39:52 +0100 Subject: [PATCH 27/29] rename --- .../templates/frontier/node/src/command.rs | 10 +++++----- .../templates/frontier/node/src/service.rs | 12 ++++++------ .../templates/simple/node/src/command.rs | 10 +++++----- .../templates/simple/node/src/service.rs | 15 ++++++--------- 4 files changed, 22 insertions(+), 25 deletions(-) diff --git a/container-chains/templates/frontier/node/src/command.rs b/container-chains/templates/frontier/node/src/command.rs index 78ac4ab2f..565e095e3 100644 --- a/container-chains/templates/frontier/node/src/command.rs +++ b/container-chains/templates/frontier/node/src/command.rs @@ -18,7 +18,7 @@ use { crate::{ chain_spec, cli::{Cli, RelayChainCli, Subcommand}, - service::{self, frontier_database_dir, NodeBuilderConfig}, + service::{self, frontier_database_dir, NodeConfig}, }, container_chain_template_frontier_runtime::Block, cumulus_client_cli::generate_genesis_block, @@ -136,7 +136,7 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; runner.async_run(|mut $config| { - let $components = NodeBuilderConfig::new_builder(&mut $config, None)?; + let $components = NodeConfig::new_builder(&mut $config, None)?; let inner = { $( $code )* }; let task_manager = $components.task_manager; @@ -238,7 +238,7 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|mut config| { - let partials = NodeBuilderConfig::new_builder(&mut config, None)?; + let partials = NodeConfig::new_builder(&mut config, None)?; cmd.run(&*config.chain_spec, &*partials.client) }) } @@ -263,7 +263,7 @@ pub fn run() -> Result<()> { } } BenchmarkCmd::Block(cmd) => runner.sync_run(|mut config| { - let partials = NodeBuilderConfig::new_builder(&mut config, None)?; + let partials = NodeConfig::new_builder(&mut config, None)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] @@ -274,7 +274,7 @@ pub fn run() -> Result<()> { )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|mut config| { - let partials = NodeBuilderConfig::new_builder(&mut config, None)?; + let partials = NodeConfig::new_builder(&mut config, None)?; let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs index ac0e7b446..7a35cecf9 100644 --- a/container-chains/templates/frontier/node/src/service.rs +++ b/container-chains/templates/frontier/node/src/service.rs @@ -31,7 +31,7 @@ use { fc_db::DatabaseSource, fc_rpc_core::types::{FeeHistoryCache, FilterPool}, nimbus_primitives::NimbusId, - node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig as _, Sealing}, + node_common::service::{ManualSealConfiguration, NodeBuilder, NodeBuilderConfig, Sealing}, sc_consensus::BasicQueue, sc_executor::NativeElseWasmExecutor, sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, @@ -54,8 +54,8 @@ type ParachainBlockImport = TParachainBlockImport< ParachainBackend, >; -pub struct NodeBuilderConfig; -impl node_common::service::NodeBuilderConfig for NodeBuilderConfig { +pub struct NodeConfig; +impl NodeBuilderConfig for NodeConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = TemplateRuntimeExecutor; @@ -133,7 +133,7 @@ impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { pub fn import_queue( parachain_config: &Configuration, - node_builder: &NodeBuilder, + node_builder: &NodeBuilder, ) -> (ParachainBlockImport, BasicQueue) { let frontier_block_import = FrontierBlockImport::new(node_builder.client.clone(), node_builder.client.clone()); @@ -175,7 +175,7 @@ async fn start_node_impl( let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let mut node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench.clone())?; + let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; // Frontier specific stuff let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); @@ -331,7 +331,7 @@ pub async fn start_dev_node( // let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench)?; + let node_builder = NodeConfig::new_builder(¶chain_config, hwbench)?; // Frontier specific stuff let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); diff --git a/container-chains/templates/simple/node/src/command.rs b/container-chains/templates/simple/node/src/command.rs index c5999b438..82f1358a7 100644 --- a/container-chains/templates/simple/node/src/command.rs +++ b/container-chains/templates/simple/node/src/command.rs @@ -18,7 +18,7 @@ use { crate::{ chain_spec, cli::{Cli, RelayChainCli, Subcommand}, - service::{self, NodeBuilderConfig}, + service::{self, NodeConfig}, }, container_chain_template_simple_runtime::Block, cumulus_client_cli::generate_genesis_block, @@ -131,7 +131,7 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; runner.async_run(|$config| { - let $components = NodeBuilderConfig::new_builder(&$config, None)?; + let $components = NodeConfig::new_builder(&$config, None)?; let inner = { $( $code )* }; let task_manager = $components.task_manager; @@ -217,7 +217,7 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { - let partials = NodeBuilderConfig::new_builder(&config, None)?; + let partials = NodeConfig::new_builder(&config, None)?; cmd.run(&*config.chain_spec, &*partials.client) }) } @@ -242,7 +242,7 @@ pub fn run() -> Result<()> { } } BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - let partials = NodeBuilderConfig::new_builder(&config, None)?; + let partials = NodeConfig::new_builder(&config, None)?; cmd.run(partials.client) }), #[cfg(not(feature = "runtime-benchmarks"))] @@ -253,7 +253,7 @@ pub fn run() -> Result<()> { )), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - let partials = NodeBuilderConfig::new_builder(&config, None)?; + let partials = NodeConfig::new_builder(&config, None)?; let db = partials.backend.expose_db(); let storage = partials.backend.expose_storage(); cmd.run(config, partials.client.clone(), db, storage) diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs index aad3aa939..81b9dd1f2 100644 --- a/container-chains/templates/simple/node/src/service.rs +++ b/container-chains/templates/simple/node/src/service.rs @@ -16,11 +16,6 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use { - node_common::service::{NodeBuilder, NodeBuilderConfig as _}, - sc_consensus::BasicQueue, -}; - #[allow(deprecated)] use { container_chain_template_simple_runtime::{opaque::Block, RuntimeApi}, @@ -28,6 +23,8 @@ use { cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport, cumulus_client_service::prepare_node_config, cumulus_primitives_core::ParaId, + node_common::service::{NodeBuilder, NodeBuilderConfig}, + sc_consensus::BasicQueue, sc_executor::NativeElseWasmExecutor, sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}, std::{sync::Arc, time::Duration}, @@ -53,8 +50,8 @@ type ParachainClient = TFullClient; type ParachainBackend = TFullBackend; type ParachainBlockImport = TParachainBlockImport, ParachainBackend>; -pub struct NodeBuilderConfig; -impl node_common::service::NodeBuilderConfig for NodeBuilderConfig { +pub struct NodeConfig; +impl NodeBuilderConfig for NodeConfig { type Block = Block; type RuntimeApi = RuntimeApi; type ParachainNativeExecutor = ParachainNativeExecutor; @@ -62,7 +59,7 @@ impl node_common::service::NodeBuilderConfig for NodeBuilderConfig { pub fn import_queue( parachain_config: &Configuration, - node_builder: &NodeBuilder, + node_builder: &NodeBuilder, ) -> (ParachainBlockImport, BasicQueue) { // The nimbus import queue ONLY checks the signature correctness // Any other checks corresponding to the author-correctness should be done @@ -101,7 +98,7 @@ pub async fn start_parachain_node( let parachain_config = prepare_node_config(parachain_config); // Create a `NodeBuilder` which helps setup parachain nodes common systems. - let mut node_builder = NodeBuilderConfig::new_builder(¶chain_config, hwbench.clone())?; + let mut node_builder = NodeConfig::new_builder(¶chain_config, hwbench.clone())?; let (_, import_queue) = import_queue(¶chain_config, &node_builder); From aba5f2fe42b294584779f75b3aca8409de8cedd9 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 24 Nov 2023 15:24:33 +0100 Subject: [PATCH 28/29] fmt --- node/src/service.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/node/src/service.rs b/node/src/service.rs index 395ceb7f4..e1c9db864 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -16,10 +16,6 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use dc_orchestrator_chain_interface::OrchestratorChainError; -use dc_orchestrator_chain_interface::OrchestratorChainInterface; -use dc_orchestrator_chain_interface::OrchestratorChainResult; - #[allow(deprecated)] use { crate::{ @@ -43,6 +39,9 @@ use { }, cumulus_relay_chain_interface::RelayChainInterface, dancebox_runtime::{opaque::Block, RuntimeApi}, + dc_orchestrator_chain_interface::{ + OrchestratorChainError, OrchestratorChainInterface, OrchestratorChainResult, + }, futures::{channel::mpsc, StreamExt}, nimbus_primitives::NimbusPair, node_common::service::NodeBuilderConfig, From 7595b1e50019584b19a9d0acf770b9d0ac226518 Mon Sep 17 00:00:00 2001 From: nanocryk <6422796+nanocryk@users.noreply.github.com> Date: Fri, 24 Nov 2023 15:54:32 +0100 Subject: [PATCH 29/29] toml-sort --- Cargo.toml | 24 +++++++-------- .../templates/frontier/runtime/Cargo.toml | 18 ++++++------ node/Cargo.toml | 2 +- pallets/author-noting/Cargo.toml | 6 ++-- pallets/authority-assignment/Cargo.toml | 4 +-- pallets/collator-assignment/Cargo.toml | 4 +-- pallets/inflation-rewards/Cargo.toml | 4 +-- pallets/pooled-staking/Cargo.toml | 4 +-- pallets/services-payment/Cargo.toml | 6 ++-- primitives/author-noting-inherent/Cargo.toml | 29 +++++++++++++++++-- primitives/traits/Cargo.toml | 2 +- runtime/dancebox/Cargo.toml | 14 ++++----- 12 files changed, 71 insertions(+), 46 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 10faa0add..5c4a79432 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,13 +48,13 @@ tp-maths = { path = "primitives/maths", default-features = false } tp-traits = { path = "primitives/traits", default-features = false } # Dancekit (wasm) -ccp-authorities-noting-inherent = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -ccp-xcm = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -dp-core = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -dp-collator-assignment = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -dp-chain-state-snapshot = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -pallet-cc-authorities-noting = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -test-relay-sproof-builder = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +ccp-authorities-noting-inherent = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +ccp-xcm = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +dp-chain-state-snapshot = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +dp-collator-assignment = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +dp-core = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +pallet-cc-authorities-noting = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +test-relay-sproof-builder = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0", default-features = false } # Dancekit (client) dc-orchestrator-chain-interface = { git = "https://github.com/moondance-labs/dancekit", branch = "tanssi-polkadot-v1.1.0" } @@ -63,13 +63,13 @@ dc-orchestrator-chain-interface = { git = "https://github.com/moondance-labs/dan nimbus-consensus = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0" } nimbus-primitives = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } pallet-author-inherent = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -pallet-maintenance-mode = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -pallet-migrations = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } -xcm-primitives = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } pallet-evm-precompile-balances-erc20 = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } pallet-evm-precompile-batch = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } pallet-evm-precompile-call-permit = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } pallet-evm-precompile-xcm-utils = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +pallet-maintenance-mode = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +pallet-migrations = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } +xcm-primitives = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false } # Substrate (wasm) frame-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false } @@ -231,12 +231,12 @@ fc-storage = { git = "https://github.com/moondance-labs/frontier", branch = "tan # General (wasm) bounded-collections = { version = "0.1.8", default-features = false } hex-literal = { version = "0.3.4" } +impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } +num_enum = { version = "0.7.1", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } serde = { version = "1.0.152", default-features = false } smallvec = "1.10.0" -num_enum = { version = "0.7.1", default-features = false } -impl-trait-for-tuples = "0.2.2" # General (client) async-io = "1.3" diff --git a/container-chains/templates/frontier/runtime/Cargo.toml b/container-chains/templates/frontier/runtime/Cargo.toml index 225ae9907..075aed67e 100644 --- a/container-chains/templates/frontier/runtime/Cargo.toml +++ b/container-chains/templates/frontier/runtime/Cargo.toml @@ -12,11 +12,11 @@ targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] hex-literal = { workspace = true, optional = true } log = { workspace = true } +num_enum = { workspace = true } parity-scale-codec = { workspace = true, features = [ "derive" ] } scale-info = { workspace = true, features = [ "derive" ] } serde = { workspace = true, optional = true, features = [ "derive" ] } smallvec = { workspace = true } -num_enum = { workspace = true } # Local ccp-xcm = { workspace = true } @@ -26,13 +26,13 @@ tp-consensus = { workspace = true } # Moonkit nimbus-primitives = { workspace = true } pallet-author-inherent = { workspace = true } -pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] } -pallet-migrations = { workspace = true } -xcm-primitives = { workspace = true } pallet-evm-precompile-balances-erc20 = { workspace = true } pallet-evm-precompile-batch = { workspace = true } pallet-evm-precompile-call-permit = { workspace = true } pallet-evm-precompile-xcm-utils = { workspace = true } +pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] } +pallet-migrations = { workspace = true } +xcm-primitives = { workspace = true } # Substrate frame-executive = { workspace = true } @@ -129,14 +129,13 @@ std = [ "pallet-ethereum/std", "pallet-ethereum/std", "pallet-evm-chain-id/std", - "pallet-evm-precompile-modexp/std", - "pallet-evm-precompile-sha3fips/std", - "pallet-evm-precompile-simple/std", "pallet-evm-precompile-balances-erc20/std", "pallet-evm-precompile-batch/std", "pallet-evm-precompile-call-permit/std", + "pallet-evm-precompile-modexp/std", + "pallet-evm-precompile-sha3fips/std", + "pallet-evm-precompile-simple/std", "pallet-evm-precompile-xcm-utils/std", - "precompile-utils/std", "pallet-evm/std", "pallet-hotfix-sufficients/std", "pallet-maintenance-mode/std", @@ -151,6 +150,7 @@ std = [ "parity-scale-codec/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", + "precompile-utils/std", "scale-info/std", "serde", "sp-api/std", @@ -203,8 +203,8 @@ try-runtime = [ "pallet-evm/try-runtime", "pallet-hotfix-sufficients/try-runtime", "pallet-maintenance-mode/try-runtime", - "pallet-proxy/try-runtime", "pallet-migrations/try-runtime", + "pallet-proxy/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", diff --git a/node/Cargo.toml b/node/Cargo.toml index ab7bf6a9a..e021abf10 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -33,8 +33,8 @@ pallet-registrar-runtime-api = { workspace = true, features = [ "std" ] } tp-author-noting-inherent = { workspace = true, features = [ "std" ] } tp-container-chain-genesis-data = { workspace = true, features = [ "json", "std" ] } -tc-consensus = { workspace = true } dc-orchestrator-chain-interface = { workspace = true } +tc-consensus = { workspace = true } # Nimbus nimbus-consensus = { workspace = true } diff --git a/pallets/author-noting/Cargo.toml b/pallets/author-noting/Cargo.toml index ab631960f..26179842d 100644 --- a/pallets/author-noting/Cargo.toml +++ b/pallets/author-noting/Cargo.toml @@ -27,9 +27,9 @@ sp-trie = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-primitives-core = { workspace = true } -tp-author-noting-inherent = { workspace = true } dp-chain-state-snapshot = { workspace = true } dp-core = { workspace = true } +tp-author-noting-inherent = { workspace = true } tp-traits = { workspace = true } [dev-dependencies] @@ -49,6 +49,8 @@ std = [ "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", "cumulus-primitives-core/std", + "dp-chain-state-snapshot/std", + "dp-core/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -60,8 +62,6 @@ std = [ "sp-state-machine/std", "sp-trie/std", "tp-author-noting-inherent/std", - "dp-chain-state-snapshot/std", - "dp-core/std", "tp-traits/std", ] runtime-benchmarks = [ diff --git a/pallets/authority-assignment/Cargo.toml b/pallets/authority-assignment/Cargo.toml index bc013bd47..128361a16 100644 --- a/pallets/authority-assignment/Cargo.toml +++ b/pallets/authority-assignment/Cargo.toml @@ -9,6 +9,7 @@ version = "0.1.0" [package.metadata.docs.rs] targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +dp-collator-assignment = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } log = { workspace = true } @@ -18,7 +19,6 @@ serde = { workspace = true, optional = true, features = [ "derive" ] } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -dp-collator-assignment = { workspace = true } tp-traits = { workspace = true } [dev-dependencies] @@ -27,12 +27,12 @@ sp-io = { workspace = true } [features] default = [ "std" ] std = [ + "dp-collator-assignment/std", "frame-support/std", "frame-system/std", "parity-scale-codec/std", "scale-info/std", "serde", - "dp-collator-assignment/std", "tp-traits/std", ] try-runtime = [ "frame-support/try-runtime" ] diff --git a/pallets/collator-assignment/Cargo.toml b/pallets/collator-assignment/Cargo.toml index adc544cd9..1f04aea99 100644 --- a/pallets/collator-assignment/Cargo.toml +++ b/pallets/collator-assignment/Cargo.toml @@ -9,6 +9,7 @@ version = "0.1.0" [package.metadata.docs.rs] targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +dp-collator-assignment = { workspace = true } frame-benchmarking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -21,7 +22,6 @@ serde = { workspace = true, optional = true, features = [ "derive" ] } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } -dp-collator-assignment = { workspace = true } tp-traits = { workspace = true } [dev-dependencies] @@ -30,6 +30,7 @@ sp-io = { workspace = true } [features] default = [ "std" ] std = [ + "dp-collator-assignment/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -38,7 +39,6 @@ std = [ "serde", "sp-runtime/std", "sp-std/std", - "dp-collator-assignment/std", "tp-traits/std", ] runtime-benchmarks = [ diff --git a/pallets/inflation-rewards/Cargo.toml b/pallets/inflation-rewards/Cargo.toml index 9db236cfc..218297011 100644 --- a/pallets/inflation-rewards/Cargo.toml +++ b/pallets/inflation-rewards/Cargo.toml @@ -11,9 +11,9 @@ targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +dp-core = { workspace = true } log = { workspace = true } serde = { workspace = true, optional = true } -dp-core = { workspace = true } tp-traits = { workspace = true } # Substrate @@ -39,6 +39,7 @@ sp-io = { workspace = true, features = [ "std" ] } [features] default = [ "std" ] std = [ + "dp-core/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -49,7 +50,6 @@ std = [ "serde", "sp-runtime/std", "sp-std/std", - "dp-core/std", "tp-traits/std", ] runtime-benchmarks = [ "frame-benchmarking" ] diff --git a/pallets/pooled-staking/Cargo.toml b/pallets/pooled-staking/Cargo.toml index afdff6bd9..2088972ff 100644 --- a/pallets/pooled-staking/Cargo.toml +++ b/pallets/pooled-staking/Cargo.toml @@ -10,9 +10,9 @@ version = "0.1.0" targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +dp-core = { workspace = true } log = { workspace = true } serde = { workspace = true, optional = true } -dp-core = { workspace = true } tp-maths = { workspace = true } tp-traits = { workspace = true } @@ -38,6 +38,7 @@ sp-io = { workspace = true, features = [ "std" ] } [features] default = [ "std" ] std = [ + "dp-core/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", @@ -47,7 +48,6 @@ std = [ "serde", "sp-runtime/std", "sp-std/std", - "dp-core/std", "tp-maths/std", "tp-traits/std", ] diff --git a/pallets/services-payment/Cargo.toml b/pallets/services-payment/Cargo.toml index 1e4c3870b..8bca212ea 100644 --- a/pallets/services-payment/Cargo.toml +++ b/pallets/services-payment/Cargo.toml @@ -9,6 +9,7 @@ version = "0.1.0" [package.metadata.docs.rs] targets = [ "x86_64-unknown-linux-gnu" ] [dependencies] +cumulus-primitives-core = { workspace = true } frame-benchmarking = { workspace = true, optional = true } frame-support = { workspace = true } frame-system = { workspace = true } @@ -17,9 +18,8 @@ parity-scale-codec = { workspace = true, features = [ "derive", "max-encoded-len scale-info = { workspace = true } serde = { workspace = true, optional = true, features = [ "derive" ] } sp-runtime = { workspace = true } -cumulus-primitives-core = { workspace = true } -tp-traits = { workspace = true } sp-std = { workspace = true } +tp-traits = { workspace = true } [dev-dependencies] pallet-balances = { workspace = true } @@ -38,5 +38,5 @@ std = [ "sp-runtime/std", "sp-std/std", ] +runtime-benchmarks = [ "frame-benchmarking", "tp-traits/runtime-benchmarks" ] try-runtime = [ "frame-support/try-runtime" ] -runtime-benchmarks = [ "frame-benchmarking", "tp-traits/runtime-benchmarks" ] \ No newline at end of file diff --git a/primitives/author-noting-inherent/Cargo.toml b/primitives/author-noting-inherent/Cargo.toml index 42cf94d94..aa5e67dba 100644 --- a/primitives/author-noting-inherent/Cargo.toml +++ b/primitives/author-noting-inherent/Cargo.toml @@ -12,8 +12,8 @@ log = { workspace = true } parity-scale-codec = { workspace = true, features = [ "derive", "max-encoded-len" ] } scale-info = { workspace = true } -test-relay-sproof-builder = { workspace = true, optional = true } dp-core = { workspace = true, optional = true } +test-relay-sproof-builder = { workspace = true, optional = true } # Substrate frame-support = { workspace = true, optional = true } @@ -41,4 +41,29 @@ futures = { workspace = true } [features] default = [ "std" ] -std = [ "async-trait", "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", "cumulus-relay-chain-interface", "cumulus-relay-chain-interface", "frame-support", "parity-scale-codec/std", "polkadot-primitives", "sc-client-api", "scale-info/std", "scale-info/std", "sp-api", "sp-consensus-aura", "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-io/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", "sp-storage", "sp-trie/std", "test-relay-sproof-builder/std", "dp-core/std" ] +std = [ + "async-trait", + "cumulus-pallet-parachain-system/std", + "cumulus-primitives-core/std", + "cumulus-relay-chain-interface", + "cumulus-relay-chain-interface", + "dp-core/std", + "frame-support", + "parity-scale-codec/std", + "polkadot-primitives", + "sc-client-api", + "scale-info/std", + "scale-info/std", + "sp-api", + "sp-consensus-aura", + "sp-consensus-aura/std", + "sp-core/std", + "sp-inherents/std", + "sp-io/std", + "sp-runtime/std", + "sp-state-machine/std", + "sp-std/std", + "sp-storage", + "sp-trie/std", + "test-relay-sproof-builder/std", +] diff --git a/primitives/traits/Cargo.toml b/primitives/traits/Cargo.toml index d75b802b6..39fbe53ec 100644 --- a/primitives/traits/Cargo.toml +++ b/primitives/traits/Cargo.toml @@ -7,8 +7,8 @@ license = "GPL-3.0-only" version = "0.1.0" [dependencies] -impl-trait-for-tuples = { workspace = true } frame-support = { workspace = true } +impl-trait-for-tuples = { workspace = true } sp-std = { workspace = true } # Cumulus diff --git a/runtime/dancebox/Cargo.toml b/runtime/dancebox/Cargo.toml index bdd930210..74249d801 100644 --- a/runtime/dancebox/Cargo.toml +++ b/runtime/dancebox/Cargo.toml @@ -18,6 +18,7 @@ serde = { workspace = true, optional = true, features = [ "derive" ] } smallvec = { workspace = true } # Own +dp-core = { workspace = true } pallet-author-noting = { workspace = true } pallet-author-noting-runtime-api = { workspace = true } pallet-authority-assignment = { workspace = true } @@ -32,7 +33,6 @@ pallet-proxy = { workspace = true } pallet-registrar = { workspace = true } pallet-registrar-runtime-api = { workspace = true } pallet-services-payment = { workspace = true } -dp-core = { workspace = true } # Moonkit nimbus-primitives = { workspace = true } @@ -154,8 +154,8 @@ std = [ "pallet-proxy/std", "pallet-registrar-runtime-api/std", "pallet-registrar/std", - "pallet-session/std", "pallet-services-payment/std", + "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -179,12 +179,12 @@ std = [ "sp-std/std", "sp-transaction-pool/std", "sp-version/std", - "tp-author-noting-inherent/std", - "tp-consensus/std", - "tp-traits/std", "staging-xcm-builder/std", "staging-xcm-executor/std", "staging-xcm/std", + "tp-author-noting-inherent/std", + "tp-consensus/std", + "tp-traits/std", "xcm-primitives/std", ] @@ -204,7 +204,7 @@ runtime-benchmarks = [ "pallet-invulnerables/runtime-benchmarks", "pallet-pooled-staking/runtime-benchmarks", "pallet-registrar/runtime-benchmarks", - "pallet-services-payment/runtime-benchmarks", + "pallet-services-payment/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm-benchmarks/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -237,8 +237,8 @@ try-runtime = [ "pallet-pooled-staking/try-runtime", "pallet-proxy/try-runtime", "pallet-registrar/try-runtime", - "pallet-services-payment/try-runtime", "pallet-root-testing/try-runtime", + "pallet-services-payment/try-runtime", "pallet-session/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime",