From aef9a7f89d12be399abeb248a486f400c75b72e9 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:00:59 +0100
Subject: [PATCH 01/29] new_partial signature
---
Cargo.lock | 65 +++++++++++++++++++++++++
Cargo.toml | 1 +
client/node-common/Cargo.toml | 80 +++++++++++++++++++++++++++++++
client/node-common/src/lib.rs | 17 +++++++
client/node-common/src/service.rs | 78 ++++++++++++++++++++++++++++++
5 files changed, 241 insertions(+)
create mode 100644 client/node-common/Cargo.toml
create mode 100644 client/node-common/src/lib.rs
create mode 100644 client/node-common/src/service.rs
diff --git a/Cargo.lock b/Cargo.lock
index e4ee5fa7a..7fa980140 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6837,6 +6837,71 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65"
+[[package]]
+name = "node-common"
+version = "0.1.0"
+dependencies = [
+ "async-io",
+ "async-trait",
+ "clap",
+ "cumulus-client-cli",
+ "cumulus-client-consensus-aura",
+ "cumulus-client-consensus-common",
+ "cumulus-client-network",
+ "cumulus-client-service",
+ "cumulus-primitives-core",
+ "cumulus-primitives-parachain-inherent",
+ "cumulus-relay-chain-interface",
+ "frame-benchmarking",
+ "frame-benchmarking-cli",
+ "futures 0.3.28",
+ "jsonrpsee",
+ "log",
+ "nimbus-consensus",
+ "nimbus-primitives",
+ "parity-scale-codec",
+ "polkadot-cli",
+ "polkadot-primitives",
+ "polkadot-service",
+ "sc-basic-authorship",
+ "sc-chain-spec",
+ "sc-cli",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-consensus-manual-seal",
+ "sc-executor",
+ "sc-network",
+ "sc-network-common",
+ "sc-network-sync",
+ "sc-offchain",
+ "sc-rpc",
+ "sc-service",
+ "sc-sysinfo",
+ "sc-telemetry",
+ "sc-tracing",
+ "sc-transaction-pool",
+ "sc-transaction-pool-api",
+ "serde",
+ "sp-api",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-aura",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-keystore",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-timestamp",
+ "sp-transaction-pool",
+ "substrate-frame-rpc-system",
+ "substrate-prometheus-endpoint",
+ "tc-consensus",
+ "try-runtime-cli",
+]
+
[[package]]
name = "nodrop"
version = "0.1.14"
diff --git a/Cargo.toml b/Cargo.toml
index 7afb36e84..4b08d6227 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -47,6 +47,7 @@ dancebox-runtime = { path = "runtime/dancebox", default-features = false }
manual-xcm-rpc = { path = "client/manual-xcm" }
tc-consensus = { path = "client/consensus" }
tc-orchestrator-chain-interface = { path = "client/orchestrator-chain-interface" }
+node-common = { path = "client/node-common" }
test-relay-sproof-builder = { path = "test-sproof-builder", default-features = false }
tp-author-noting-inherent = { path = "primitives/author-noting-inherent", default-features = false }
tp-chain-state-snapshot = { path = "primitives/chain-state-snapshot", default-features = false }
diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml
new file mode 100644
index 000000000..8ac291e2c
--- /dev/null
+++ b/client/node-common/Cargo.toml
@@ -0,0 +1,80 @@
+[package]
+name = "node-common"
+authors = { workspace = true }
+description = "Common code between various nodes"
+edition = "2021"
+license = "GPL-3.0-only"
+version = "0.1.0"
+
+[dependencies]
+async-io = { workspace = true }
+async-trait = { workspace = true }
+clap = { workspace = true, features = [ "derive" ] }
+futures = { workspace = true }
+jsonrpsee = { workspace = true, features = [ "server" ] }
+log = { workspace = true }
+parity-scale-codec = { workspace = true }
+serde = { workspace = true, features = [ "derive" ] }
+
+# Local
+tc-consensus = { workspace = true }
+
+# Nimbus
+nimbus-consensus = { workspace = true }
+nimbus-primitives = { workspace = true, features = [ "std" ] }
+
+# Substrate
+frame-benchmarking = { workspace = true }
+frame-benchmarking-cli = { workspace = true }
+sc-basic-authorship = { workspace = true }
+sc-chain-spec = { workspace = true }
+sc-cli = { workspace = true }
+sc-client-api = { workspace = true }
+sc-consensus = { workspace = true }
+sc-consensus-manual-seal = { workspace = true }
+sc-executor = { workspace = true }
+sc-network = { workspace = true }
+sc-network-common = { workspace = true }
+sc-network-sync = { workspace = true }
+sc-offchain = { workspace = true }
+sc-rpc = { workspace = true }
+sc-service = { workspace = true }
+sc-sysinfo = { workspace = true }
+sc-telemetry = { workspace = true }
+sc-tracing = { workspace = true }
+sc-transaction-pool = { workspace = true }
+sc-transaction-pool-api = { workspace = true }
+sp-api = { workspace = true, features = [ "std" ] }
+sp-block-builder = { workspace = true }
+sp-blockchain = { workspace = true }
+sp-consensus = { workspace = true }
+sp-consensus-aura = { workspace = true }
+sp-core = { workspace = true, features = [ "std" ] }
+sp-inherents = { workspace = true, features = [ "std" ] }
+sp-io = { workspace = true, features = [ "std" ] }
+sp-keystore = { workspace = true, features = [ "std" ] }
+sp-offchain = { workspace = true, features = [ "std" ] }
+sp-runtime = { workspace = true, features = [ "std" ] }
+sp-session = { workspace = true, features = [ "std" ] }
+sp-timestamp = { workspace = true, features = [ "std" ] }
+
+sp-transaction-pool = { workspace = true }
+substrate-frame-rpc-system = { workspace = true }
+substrate-prometheus-endpoint = { workspace = true }
+try-runtime-cli = { workspace = true, optional = true }
+
+# Polkadot
+polkadot-cli = { workspace = true }
+polkadot-primitives = { workspace = true }
+polkadot-service = { workspace = true }
+
+# Cumulus
+cumulus-client-cli = { workspace = true }
+cumulus-client-consensus-aura = { workspace = true }
+cumulus-client-consensus-common = { workspace = true }
+cumulus-client-network = { workspace = true }
+cumulus-client-service = { workspace = true }
+cumulus-primitives-core = { workspace = true }
+cumulus-primitives-parachain-inherent = { workspace = true }
+cumulus-relay-chain-interface = { workspace = true }
+
diff --git a/client/node-common/src/lib.rs b/client/node-common/src/lib.rs
new file mode 100644
index 000000000..673211af3
--- /dev/null
+++ b/client/node-common/src/lib.rs
@@ -0,0 +1,17 @@
+// Copyright (C) Moondance Labs Ltd.
+// This file is part of Tanssi.
+
+// Tanssi is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Tanssi is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Tanssi. If not, see .
+
+pub mod service;
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
new file mode 100644
index 000000000..c7e9094c5
--- /dev/null
+++ b/client/node-common/src/service.rs
@@ -0,0 +1,78 @@
+// Copyright (C) Moondance Labs Ltd.
+// This file is part of Tanssi.
+
+// Tanssi is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Tanssi is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Tanssi. If not, see .
+
+use {sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue};
+
+use {
+ cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
+ sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch},
+ sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
+ sc_telemetry::{Telemetry, TelemetryWorkerHandle},
+ sc_transaction_pool::ChainApi,
+ std::sync::Arc,
+};
+
+pub type ParachainExecutor =
+ NativeElseWasmExecutor;
+pub type ParachainClient =
+ TFullClient>;
+pub type ParachainBackend = TFullBackend;
+pub type ParachainBlockImport = TParachainBlockImport<
+ Block,
+ Arc>,
+ ParachainBackend,
+>;
+
+type ConstructedRuntimeApi =
+ >::RuntimeApi;
+
+pub trait BlockT: cumulus_primitives_core::BlockT {}
+
+pub fn new_partial(
+ config: &Configuration,
+) -> Result<
+ PartialComponents<
+ ParachainClient,
+ ParachainBackend,
+ SelectChain,
+ sc_consensus::DefaultImportQueue,
+ sc_transaction_pool::FullPool<
+ Block,
+ ParachainClient,
+ >,
+ (
+ ParachainBlockImport,
+ Option,
+ Option,
+ ),
+ >,
+ sc_service::Error,
+>
+where
+ Block: BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi>
+ + Sync
+ + Send
+ + 'static,
+ ConstructedRuntimeApi<
+ Block,
+ ParachainClient,
+ RuntimeApi,
+ >: TaggedTransactionQueue,
+{
+ todo!()
+}
From 5dfb3238eaa5e2892ee4c095a0b89cd6e6f27a71 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:28:21 +0100
Subject: [PATCH 02/29] common new_partial for tanssi and simple (not frontier)
---
Cargo.lock | 2 +
client/node-common/src/service.rs | 105 ++++++++++++++++--
.../templates/simple/node/Cargo.toml | 1 +
.../templates/simple/node/src/service.rs | 78 +------------
node/Cargo.toml | 1 +
node/src/container_chain_spawner.rs | 6 +-
node/src/service.rs | 82 +-------------
7 files changed, 104 insertions(+), 171 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 7fa980140..8302fd273 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1747,6 +1747,7 @@ dependencies = [
"log",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"parity-scale-codec",
"polkadot-cli",
"polkadot-primitives",
@@ -14637,6 +14638,7 @@ dependencies = [
"manual-xcm-rpc",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"pallet-author-noting-runtime-api",
"pallet-collator-assignment-runtime-api",
"pallet-configuration",
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index c7e9094c5..0e8dc891d 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -14,14 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use {sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue};
+use sp_block_builder::BlockBuilder;
use {
cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
- sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch},
+ sc_executor::{
+ HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
+ DEFAULT_HEAP_ALLOC_STRATEGY,
+ },
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
- sc_telemetry::{Telemetry, TelemetryWorkerHandle},
- sc_transaction_pool::ChainApi,
+ sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sp_api::ConstructRuntimeApi,
+ sp_transaction_pool::runtime_api::TaggedTransactionQueue,
std::sync::Arc,
};
@@ -35,14 +39,12 @@ pub type ParachainBlockImport = TPar
Arc>,
ParachainBackend,
>;
-
-type ConstructedRuntimeApi =
+pub type ConstructedRuntimeApi =
>::RuntimeApi;
-pub trait BlockT: cumulus_primitives_core::BlockT {}
-
pub fn new_partial(
config: &Configuration,
+ select_chain: SelectChain,
) -> Result<
PartialComponents<
ParachainClient,
@@ -62,7 +64,7 @@ pub fn new_partial(
sc_service::Error,
>
where
- Block: BlockT,
+ Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi>
+ Sync
@@ -72,7 +74,88 @@ where
Block,
ParachainClient,
RuntimeApi,
- >: TaggedTransactionQueue,
+ >: TaggedTransactionQueue + BlockBuilder,
{
- todo!()
+ let telemetry = config
+ .telemetry_endpoints
+ .clone()
+ .filter(|x| !x.is_empty())
+ .map(|endpoints| -> Result<_, sc_telemetry::Error> {
+ let worker = TelemetryWorker::new(16)?;
+ let telemetry = worker.handle().new_telemetry(endpoints);
+ Ok((worker, telemetry))
+ })
+ .transpose()?;
+
+ let heap_pages = config
+ .default_heap_pages
+ .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
+ extra_pages: h as _,
+ });
+
+ // Default runtime_cache_size is 2
+ // For now we can work with this, but it will likely need
+ // to change once we start having runtime_cache_sizes, or
+ // run nodes with the maximum for this value
+ let wasm = WasmExecutor::builder()
+ .with_execution_method(config.wasm_method)
+ .with_onchain_heap_alloc_strategy(heap_pages)
+ .with_offchain_heap_alloc_strategy(heap_pages)
+ .with_max_runtime_instances(config.max_runtime_instances)
+ .with_runtime_cache_size(config.runtime_cache_size)
+ .build();
+
+ let executor: ParachainExecutor =
+ ParachainExecutor::new_with_wasm_executor(wasm);
+
+ let (client, backend, keystore_container, task_manager) =
+ sc_service::new_full_parts::(
+ config,
+ telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+ executor,
+ )?;
+ let client = Arc::new(client);
+
+ let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+ let telemetry = telemetry.map(|(worker, telemetry)| {
+ task_manager
+ .spawn_handle()
+ .spawn("telemetry", None, worker.run());
+ telemetry
+ });
+
+ let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+ config.transaction_pool.clone(),
+ config.role.is_authority().into(),
+ config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain,
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
diff --git a/container-chains/templates/simple/node/Cargo.toml b/container-chains/templates/simple/node/Cargo.toml
index 252ba852b..b598a54cd 100644
--- a/container-chains/templates/simple/node/Cargo.toml
+++ b/container-chains/templates/simple/node/Cargo.toml
@@ -18,6 +18,7 @@ parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
# Local
+node-common = { workspace = true }
container-chain-template-simple-runtime = { workspace = true, features = [ "std" ] }
tc-consensus = { workspace = true }
diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs
index 550bc33d9..0a8b23ac1 100644
--- a/container-chains/templates/simple/node/src/service.rs
+++ b/container-chains/templates/simple/node/src/service.rs
@@ -92,83 +92,7 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: (),
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ node_common::service::new_partial(config, ())
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/node/Cargo.toml b/node/Cargo.toml
index 593a8e56f..2b88db0b7 100644
--- a/node/Cargo.toml
+++ b/node/Cargo.toml
@@ -22,6 +22,7 @@ serde_json = { workspace = true }
tokio = { workspace = true }
# Local
+node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] }
dancebox-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs
index c535133ad..ba8c52f00 100644
--- a/node/src/container_chain_spawner.rs
+++ b/node/src/container_chain_spawner.rs
@@ -49,8 +49,10 @@ use {
time::Instant,
},
tc_orchestrator_chain_interface::OrchestratorChainInterface,
- tokio::sync::{mpsc, oneshot},
- tokio::time::{sleep, Duration},
+ tokio::{
+ sync::{mpsc, oneshot},
+ time::{sleep, Duration},
+ },
};
/// Struct with all the params needed to start a container chain node given the CLI arguments,
diff --git a/node/src/service.rs b/node/src/service.rs
index 4d85901b3..b6f799c05 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -137,87 +137,7 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
- // The nimbus import queue ONLY checks the signature correctness
- // Any other checks corresponding to the author-correctness should be done
- // in the runtime
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- let maybe_select_chain = None;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: maybe_select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ node_common::service::new_partial(config, None)
}
/// Background task used to detect changes to container chain assignment,
From 80326e98f541ad58493314555f2d126d6bbad936 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:50:50 +0100
Subject: [PATCH 03/29] extract divergent code with frontier new_partial
---
Cargo.lock | 1 +
client/node-common/src/service.rs | 114 ++++++++++++------
.../templates/frontier/node/Cargo.toml | 1 +
.../templates/frontier/node/src/service.rs | 69 ++---------
.../templates/simple/node/src/service.rs | 48 +++++++-
node/src/service.rs | 38 +++++-
6 files changed, 170 insertions(+), 101 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 8302fd273..964c06a78 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1600,6 +1600,7 @@ dependencies = [
"manual-xcm-rpc",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"pallet-ethereum",
"pallet-transaction-payment-rpc",
"pallet-transaction-payment-rpc-runtime-api",
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 0e8dc891d..c934ce736 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -14,7 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use sp_block_builder::BlockBuilder;
+use {
+ sc_service::{KeystoreContainer, TaskManager},
+ sp_block_builder::BlockBuilder,
+};
use {
cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
@@ -22,7 +25,7 @@ use {
HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
DEFAULT_HEAP_ALLOC_STRATEGY,
},
- sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
+ sc_service::{Configuration, TFullBackend, TFullClient},
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
sp_api::ConstructRuntimeApi,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
@@ -42,25 +45,53 @@ pub type ParachainBlockImport = TPar
pub type ConstructedRuntimeApi =
>::RuntimeApi;
-pub fn new_partial(
- config: &Configuration,
- select_chain: SelectChain,
-) -> Result<
- PartialComponents<
+pub struct NewPartial
+where
+ Block: cumulus_primitives_core::BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi>
+ + Sync
+ + Send
+ + 'static,
+ ConstructedRuntimeApi<
+ Block,
ParachainClient,
- ParachainBackend,
- SelectChain,
- sc_consensus::DefaultImportQueue,
+ RuntimeApi,
+ >: TaggedTransactionQueue + BlockBuilder,
+{
+ pub client: Arc>,
+ pub backend: Arc>,
+ pub task_manager: TaskManager,
+ pub keystore_container: KeystoreContainer,
+ pub transaction_pool: Arc<
sc_transaction_pool::FullPool<
Block,
ParachainClient,
>,
- (
- ParachainBlockImport,
- Option,
- Option,
- ),
>,
+ pub telemetry: Option,
+ pub telemetry_worker_handle: Option,
+}
+
+pub fn new_partial(
+ config: &Configuration,
+) -> Result<
+ // PartialComponents<
+ // ParachainClient,
+ // ParachainBackend,
+ // SelectChain,
+ // sc_consensus::DefaultImportQueue,
+ // sc_transaction_pool::FullPool<
+ // Block,
+ // ParachainClient,
+ // >,
+ // (
+ // ParachainBlockImport,
+ // Option,
+ // Option,
+ // ),
+ // >,
+ NewPartial,
sc_service::Error,
>
where
@@ -133,29 +164,38 @@ where
client.clone(),
);
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
+ // let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ // let import_queue = nimbus_consensus::import_queue(
+ // client.clone(),
+ // block_import.clone(),
+ // move |_, _| async move {
+ // let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ // Ok((time,))
+ // },
+ // &task_manager.spawn_essential_handle(),
+ // config.prometheus_registry(),
+ // false,
+ // )?;
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain,
+ // other: (block_import, telemetry, telemetry_worker_handle),
+ // })
+ Ok(NewPartial {
client,
- import_queue,
- keystore_container,
- task_manager,
+ backend,
transaction_pool,
- select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
})
}
diff --git a/container-chains/templates/frontier/node/Cargo.toml b/container-chains/templates/frontier/node/Cargo.toml
index 4c868bdd5..7ab6c5189 100644
--- a/container-chains/templates/frontier/node/Cargo.toml
+++ b/container-chains/templates/frontier/node/Cargo.toml
@@ -21,6 +21,7 @@ serde = { workspace = true, features = [ "derive" ] }
url = { workspace = true }
# Local
+node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true }
container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs
index 9a28d6178..99d31eeae 100644
--- a/container-chains/templates/frontier/node/src/service.rs
+++ b/container-chains/templates/frontier/node/src/service.rs
@@ -18,7 +18,6 @@
use {
cumulus_client_consensus_common::ParachainBlockImport,
- sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
sc_network::config::FullNetworkConfiguration,
};
// std
@@ -65,9 +64,11 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
- sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sc_telemetry::{Telemetry, TelemetryWorkerHandle},
};
+use node_common::service::NewPartial;
+
/// Native executor type.
use crate::client::TemplateRuntimeExecutor;
@@ -160,53 +161,15 @@ pub fn new_partial(
// Use ethereum style for subscription ids
config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider));
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- // Default runtime_cache_size is 2
- // For now we can work with this, but it will likely need
- // to change once we start having runtime_cache_sizes, or
- // run nodes with the maximum for this value
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
let maybe_select_chain = if dev_service {
Some(sc_consensus::LongestChain::new(backend.clone()))
@@ -214,14 +177,6 @@ pub fn new_partial(
None
};
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new())));
let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs
index 0a8b23ac1..6b81d696f 100644
--- a/container-chains/templates/simple/node/src/service.rs
+++ b/container-chains/templates/simple/node/src/service.rs
@@ -1,5 +1,3 @@
-//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
-
// Copyright (C) Moondance Labs Ltd.
// This file is part of Tanssi.
@@ -15,14 +13,18 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
+
+//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
// std
use std::{sync::Arc, time::Duration};
use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfiguration};
// Local Runtime Types
-use container_chain_template_simple_runtime::{opaque::Block, RuntimeApi};
+use {
+ container_chain_template_simple_runtime::{opaque::Block, RuntimeApi},
+ node_common::service::NewPartial,
+};
// Cumulus Imports
#[allow(deprecated)]
@@ -44,7 +46,7 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
- sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sc_telemetry::{Telemetry, TelemetryWorkerHandle},
sc_transaction_pool_api::OffchainTransactionPoolFactory,
};
@@ -92,7 +94,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- node_common::service::new_partial(config, ())
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: (),
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/node/src/service.rs b/node/src/service.rs
index b6f799c05..07567c0d7 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -16,6 +16,8 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
+use node_common::service::NewPartial;
+
#[allow(deprecated)]
use {
crate::{
@@ -137,7 +139,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- node_common::service::new_partial(config, None)
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: None,
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
/// Background task used to detect changes to container chain assignment,
From 6b71e471db64640273b20f9afc2005bb406bc735 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 11:55:18 +0100
Subject: [PATCH 04/29] cleanup
---
client/node-common/src/service.rs | 48 ++-----------------------------
node/src/service.rs | 3 ++
2 files changed, 5 insertions(+), 46 deletions(-)
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index c934ce736..0047c6a01 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -75,25 +75,7 @@ where
pub fn new_partial(
config: &Configuration,
-) -> Result<
- // PartialComponents<
- // ParachainClient,
- // ParachainBackend,
- // SelectChain,
- // sc_consensus::DefaultImportQueue,
- // sc_transaction_pool::FullPool<
- // Block,
- // ParachainClient,
- // >,
- // (
- // ParachainBlockImport,
- // Option,
- // Option,
- // ),
- // >,
- NewPartial,
- sc_service::Error,
->
+) -> Result, sc_service::Error>
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
@@ -136,8 +118,7 @@ where
.with_runtime_cache_size(config.runtime_cache_size)
.build();
- let executor: ParachainExecutor =
- ParachainExecutor::new_with_wasm_executor(wasm);
+ let executor = ParachainExecutor::new_with_wasm_executor(wasm);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::(
@@ -164,31 +145,6 @@ where
client.clone(),
);
- // let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- // let import_queue = nimbus_consensus::import_queue(
- // client.clone(),
- // block_import.clone(),
- // move |_, _| async move {
- // let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- // Ok((time,))
- // },
- // &task_manager.spawn_essential_handle(),
- // config.prometheus_registry(),
- // false,
- // )?;
-
- // Ok(PartialComponents {
- // backend,
- // client,
- // import_queue,
- // keystore_container,
- // task_manager,
- // transaction_pool,
- // select_chain,
- // other: (block_import, telemetry, telemetry_worker_handle),
- // })
Ok(NewPartial {
client,
backend,
diff --git a/node/src/service.rs b/node/src/service.rs
index 07567c0d7..bbf6d8132 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -151,6 +151,9 @@ pub fn new_partial(
let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+ // The nimbus import queue ONLY checks the signature correctness
+ // Any other checks corresponding to the author-correctness should be done
+ // in the runtime
let import_queue = nimbus_consensus::import_queue(
client.clone(),
block_import.clone(),
From 3bb5279b61edadd88984b12bdb9848e9bbb633a0 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 12:03:04 +0100
Subject: [PATCH 05/29] update new_partial_dev
---
node/src/service.rs | 66 ++++++++-------------------------------------
1 file changed, 11 insertions(+), 55 deletions(-)
diff --git a/node/src/service.rs b/node/src/service.rs
index bbf6d8132..8f6d89e92 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -55,16 +55,14 @@ use {
UsageProvider,
},
sc_consensus::{BlockImport, ImportQueue},
- sc_executor::{
- HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY,
- },
+ sc_executor::NativeElseWasmExecutor,
sc_network::{config::FullNetworkConfiguration, NetworkBlock},
sc_network_sync::SyncingService,
sc_service::{
Configuration, Error as ServiceError, PartialComponents, TFullBackend, TFullClient,
TaskManager,
},
- sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle},
+ sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorkerHandle},
sp_api::StorageProof,
sp_consensus::SyncOracle,
sp_core::{
@@ -271,57 +269,15 @@ pub fn new_partial_dev(
>,
sc_service::Error,
> {
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
let block_import = DevParachainBlockImport::new(client.clone());
let import_queue = build_manual_seal_import_queue(
From f7840b7372fb8cb806755d5307cd6271adda3093 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 14:01:43 +0100
Subject: [PATCH 06/29] use macro to reduce generics verbosity
---
client/node-common/src/service.rs | 59 +++++++++++--------------------
1 file changed, 20 insertions(+), 39 deletions(-)
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 0047c6a01..90f14afe8 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -32,43 +32,31 @@ use {
std::sync::Arc,
};
-pub type ParachainExecutor =
- NativeElseWasmExecutor;
-pub type ParachainClient =
- TFullClient>;
-pub type ParachainBackend = TFullBackend;
-pub type ParachainBlockImport = TParachainBlockImport<
- Block,
- Arc>,
- ParachainBackend,
->;
-pub type ConstructedRuntimeApi =
- >::RuntimeApi;
+/// Functions in this module are generic over `Block`, `RuntimeApi`, and
+/// `ParachainNativeExecutor`. Using type aliases requires them to be
+/// generic too, which makes them still verbose to use. For that reason we use
+/// a macro that expect the above types to already be in scope.
+macro_rules! T {
+ [Executor] => { NativeElseWasmExecutor };
+ [Client] => { TFullClient };
+ [Backend] => { TFullBackend };
+ [ConstructedRuntimeApi] => {
+ >::RuntimeApi
+ };
+}
pub struct NewPartial
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
- RuntimeApi: ConstructRuntimeApi>
- + Sync
- + Send
- + 'static,
- ConstructedRuntimeApi<
- Block,
- ParachainClient,
- RuntimeApi,
- >: TaggedTransactionQueue + BlockBuilder,
+ RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder,
{
- pub client: Arc>,
- pub backend: Arc>,
+ pub client: Arc,
+ pub backend: Arc,
pub task_manager: TaskManager,
pub keystore_container: KeystoreContainer,
- pub transaction_pool: Arc<
- sc_transaction_pool::FullPool<
- Block,
- ParachainClient,
- >,
- >,
+ pub transaction_pool: Arc>,
pub telemetry: Option,
pub telemetry_worker_handle: Option,
}
@@ -79,15 +67,8 @@ pub fn new_partial(
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
- RuntimeApi: ConstructRuntimeApi>
- + Sync
- + Send
- + 'static,
- ConstructedRuntimeApi<
- Block,
- ParachainClient,
- RuntimeApi,
- >: TaggedTransactionQueue + BlockBuilder,
+ RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder,
{
let telemetry = config
.telemetry_endpoints
@@ -118,7 +99,7 @@ where
.with_runtime_cache_size(config.runtime_cache_size)
.build();
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
+ let executor = ::new_with_wasm_executor(wasm);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::(
From 8cd20962468eaf9c5c2728efb89be7c868a7ee63 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Fri, 3 Nov 2023 16:41:59 +0100
Subject: [PATCH 07/29] generic build_cumulus_network based on custom
import_queue + toml-sort
---
Cargo.lock | 2 +
Cargo.toml | 32 +-
client/consensus/Cargo.toml | 2 +-
client/manual-xcm/Cargo.toml | 2 +-
client/node-common/Cargo.toml | 3 +-
client/node-common/src/service.rs | 307 +++++++++++++-----
.../templates/frontier/node/Cargo.toml | 14 +-
.../templates/frontier/node/src/service.rs | 120 +++----
.../templates/frontier/runtime/Cargo.toml | 28 +-
.../templates/simple/node/Cargo.toml | 7 +-
.../templates/simple/node/src/service.rs | 73 +++--
.../templates/simple/runtime/Cargo.toml | 20 +-
node/Cargo.toml | 8 +-
node/src/service.rs | 180 ++++++----
pallets/collator-assignment/Cargo.toml | 10 +-
pallets/pooled-staking/Cargo.toml | 2 +-
pallets/registrar/Cargo.toml | 2 +-
.../container-chain-genesis-data/Cargo.toml | 2 +-
primitives/core/Cargo.toml | 2 +-
primitives/traits/Cargo.toml | 2 +-
runtime/dancebox/Cargo.toml | 64 +---
21 files changed, 515 insertions(+), 367 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 964c06a78..1d3742b80 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6875,6 +6875,7 @@ dependencies = [
"sc-network",
"sc-network-common",
"sc-network-sync",
+ "sc-network-transactions",
"sc-offchain",
"sc-rpc",
"sc-service",
@@ -6883,6 +6884,7 @@ dependencies = [
"sc-tracing",
"sc-transaction-pool",
"sc-transaction-pool-api",
+ "sc-utils",
"serde",
"sp-api",
"sp-block-builder",
diff --git a/Cargo.toml b/Cargo.toml
index 4b08d6227..225699beb 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -45,9 +45,9 @@ pallet-cc-authorities-noting = { path = "container-chains/pallets/authorities-no
dancebox-runtime = { path = "runtime/dancebox", default-features = false }
manual-xcm-rpc = { path = "client/manual-xcm" }
+node-common = { path = "client/node-common" }
tc-consensus = { path = "client/consensus" }
tc-orchestrator-chain-interface = { path = "client/orchestrator-chain-interface" }
-node-common = { path = "client/node-common" }
test-relay-sproof-builder = { path = "test-sproof-builder", default-features = false }
tp-author-noting-inherent = { path = "primitives/author-noting-inherent", default-features = false }
tp-chain-state-snapshot = { path = "primitives/chain-state-snapshot", default-features = false }
@@ -67,7 +67,6 @@ pallet-migrations = { git = "https://github.com/moondance-labs/moonkit", branch
xcm-primitives = { git = "https://github.com/moondance-labs/moonkit", branch = "tanssi-polkadot-v1.1.0", default-features = false }
# Substrate (wasm)
-sp-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
frame-benchmarking = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
frame-executive = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
frame-support = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", version = "4.0.0-dev", default-features = false }
@@ -94,6 +93,7 @@ sp-block-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", bra
sp-consensus = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
sp-consensus-aura = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
sp-consensus-babe = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
+sp-consensus-beefy = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
sp-consensus-slots = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
sp-core = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", version = "21.0.0", default-features = false }
sp-debug-derive = { git = "https://github.com/moondance-labs/polkadot-sdk.git", branch = "tanssi-polkadot-v1.1.0", default-features = false }
@@ -129,6 +129,7 @@ sc-network = { git = "https://github.com/moondance-labs/polkadot-sdk", branch =
sc-network-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-network-sync = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-network-test = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
+sc-network-transactions = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-offchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-rpc = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-service = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
@@ -137,6 +138,7 @@ sc-telemetry = { git = "https://github.com/moondance-labs/polkadot-sdk", branch
sc-tracing = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-transaction-pool = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sc-transaction-pool-api = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
+sc-utils = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sp-blockchain = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
sp-externalities = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
sp-keystore = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
@@ -157,11 +159,11 @@ pallet-xcm-benchmarks = { git = "https://github.com/moondance-labs/polkadot-sdk"
polkadot-parachain-primitives = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
polkadot-runtime-common = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
polkadot-runtime-parachains = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
-westend-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
-westend-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
staging-xcm = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
staging-xcm-builder = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
staging-xcm-executor = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
+westend-runtime = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
+westend-runtime-constants = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0", default-features = false }
# Polkadot (client)
polkadot-cli = { git = "https://github.com/moondance-labs/polkadot-sdk", branch = "tanssi-polkadot-v1.1.0" }
@@ -224,9 +226,9 @@ fc-storage = { git = "https://github.com/moondance-labs/frontier", branch = "tan
bounded-collections = { version = "0.1.8", default-features = false }
hex-literal = { version = "0.3.4" }
log = { version = "0.4.17", default-features = false }
+rand_chacha = { version = "0.3.1", default-features = false }
serde = { version = "1.0.152", default-features = false }
smallvec = "1.10.0"
-rand_chacha = { version = "0.3.1", default-features = false }
# General (client)
async-io = "1.3"
@@ -249,6 +251,16 @@ tokio = { version = "1.32.0", default-features = false }
tracing = { version = "0.1.37", default-features = false }
url = "2.2.2"
+[patch.crates-io]
+jsonrpsee = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-client-transport = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-core = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-http-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-proc-macros = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-server = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-types = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+jsonrpsee-ws-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
+
[profile.production]
codegen-units = 1
inherits = "release"
@@ -258,13 +270,3 @@ lto = true
[profile.release]
opt-level = 3
panic = "unwind"
-
-[patch.crates-io]
-jsonrpsee = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-client-transport = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-core = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-types = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-http-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-proc-macros = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-server = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
-jsonrpsee-ws-client = { git = "https://github.com/moondance-labs/jsonrpsee", branch = "tanssi-polkadot-v1.1.0" }
diff --git a/client/consensus/Cargo.toml b/client/consensus/Cargo.toml
index a8832a4f0..9a8acf9c1 100644
--- a/client/consensus/Cargo.toml
+++ b/client/consensus/Cargo.toml
@@ -14,7 +14,7 @@ sc-consensus-manual-seal = { workspace = true }
sc-consensus-slots = { workspace = true }
sc-telemetry = { workspace = true }
sp-api = { workspace = true }
-sp-application-crypto = { workspace = true, features = [ "std", "full_crypto"] }
+sp-application-crypto = { workspace = true, features = [ "full_crypto", "std" ] }
sp-block-builder = { workspace = true }
sp-blockchain = { workspace = true }
sp-consensus = { workspace = true }
diff --git a/client/manual-xcm/Cargo.toml b/client/manual-xcm/Cargo.toml
index 02511af71..993881404 100644
--- a/client/manual-xcm/Cargo.toml
+++ b/client/manual-xcm/Cargo.toml
@@ -12,7 +12,7 @@ futures = { workspace = true, features = [ "compat" ] }
hex-literal = { workspace = true }
jsonrpsee = { workspace = true, features = [ "macros", "server" ] }
parity-scale-codec = { workspace = true, features = [ "std" ] }
-tokio = { workspace = true, features = [ "sync", "time" ] }
staging-xcm = { workspace = true }
+tokio = { workspace = true, features = [ "sync", "time" ] }
cumulus-primitives-core = { workspace = true, features = [ "std" ] }
diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml
index 8ac291e2c..a251f5143 100644
--- a/client/node-common/Cargo.toml
+++ b/client/node-common/Cargo.toml
@@ -36,6 +36,7 @@ sc-executor = { workspace = true }
sc-network = { workspace = true }
sc-network-common = { workspace = true }
sc-network-sync = { workspace = true }
+sc-network-transactions = { workspace = true }
sc-offchain = { workspace = true }
sc-rpc = { workspace = true }
sc-service = { workspace = true }
@@ -44,6 +45,7 @@ sc-telemetry = { workspace = true }
sc-tracing = { workspace = true }
sc-transaction-pool = { workspace = true }
sc-transaction-pool-api = { workspace = true }
+sc-utils = { workspace = true }
sp-api = { workspace = true, features = [ "std" ] }
sp-block-builder = { workspace = true }
sp-blockchain = { workspace = true }
@@ -77,4 +79,3 @@ cumulus-client-service = { workspace = true }
cumulus-primitives-core = { workspace = true }
cumulus-primitives-parachain-inherent = { workspace = true }
cumulus-relay-chain-interface = { workspace = true }
-
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 90f14afe8..4c3ff78a4 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -14,20 +14,33 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use {
- sc_service::{KeystoreContainer, TaskManager},
- sp_block_builder::BlockBuilder,
-};
+use {futures::FutureExt, sp_offchain::OffchainWorkerApi};
use {
- cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
+ cumulus_client_cli::CollatorOptions,
+ cumulus_client_service::{
+ build_relay_chain_interface, prepare_node_config, CollatorSybilResistance,
+ },
+ cumulus_primitives_core::ParaId,
+ cumulus_relay_chain_interface::RelayChainInterface,
+ polkadot_primitives::CollatorPair,
+ sc_client_api::Backend,
+ sc_consensus::ImportQueue,
sc_executor::{
HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
DEFAULT_HEAP_ALLOC_STRATEGY,
},
- sc_service::{Configuration, TFullBackend, TFullClient},
+ sc_network::{config::FullNetworkConfiguration, NetworkService},
+ sc_network_sync::SyncingService,
+ sc_network_transactions::TransactionsHandlerController,
+ sc_service::{
+ Configuration, KeystoreContainer, NetworkStarter, TFullBackend, TFullClient, TaskManager,
+ },
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sc_transaction_pool_api::OffchainTransactionPoolFactory,
+ sc_utils::mpsc::TracingUnboundedSender,
sp_api::ConstructRuntimeApi,
+ sp_block_builder::BlockBuilder,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
std::sync::Arc,
};
@@ -43,10 +56,33 @@ macro_rules! T {
[ConstructedRuntimeApi] => {
>::RuntimeApi
};
+ [Where] => {
+ Block: cumulus_primitives_core::BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder,
+ }
}
-pub struct NewPartial
-where
+pub struct CumulusNetwork {
+ pub network: Arc>,
+ pub system_rpc_tx: TracingUnboundedSender>,
+ pub tx_handler_controller: TransactionsHandlerController,
+ pub start_network: NetworkStarter,
+ pub sync_service: Arc>,
+}
+
+pub struct NodeBuilder<
+ Block,
+ RuntimeApi,
+ ParachainNativeExecutor,
+ // `cumulus_client_service::build_network` returns many important systems,
+ // but can only be called with an `import_queue` which can be different in
+ // each node. For that reason it is a `()` when calling `new`, then the
+ // caller create the `import_queue` using systems contained in `NodeBuilder`,
+ // then call `build_cumulus_network` with it to generate the cumulus systems.
+ Cumulus = (),
+> where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
@@ -59,80 +95,203 @@ where
pub transaction_pool: Arc>,
pub telemetry: Option,
pub telemetry_worker_handle: Option,
+
+ pub relay_chain_interface: Arc,
+ pub collator_key: Option,
+
+ pub cumulus: Cumulus,
}
-pub fn new_partial(
- config: &Configuration,
-) -> Result, sc_service::Error>
+impl
+ NodeBuilder
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
- T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue
+ + BlockBuilder
+ + cumulus_primitives_core::CollectCollationInfo,
{
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
+ pub async fn new(
+ parachain_config: &Configuration,
+ polkadot_config: Configuration,
+ collator_options: CollatorOptions,
+ hwbench: Option,
+ ) -> Result {
+ let telemetry = parachain_config
+ .telemetry_endpoints
+ .clone()
+ .filter(|x| !x.is_empty())
+ .map(|endpoints| -> Result<_, sc_telemetry::Error> {
+ let worker = TelemetryWorker::new(16)?;
+ let telemetry = worker.handle().new_telemetry(endpoints);
+ Ok((worker, telemetry))
+ })
+ .transpose()?;
+
+ let heap_pages =
+ parachain_config
+ .default_heap_pages
+ .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
+ extra_pages: h as _,
+ });
+
+ // Default runtime_cache_size is 2
+ // For now we can work with this, but it will likely need
+ // to change once we start having runtime_cache_sizes, or
+ // run nodes with the maximum for this value
+ let wasm = WasmExecutor::builder()
+ .with_execution_method(parachain_config.wasm_method)
+ .with_onchain_heap_alloc_strategy(heap_pages)
+ .with_offchain_heap_alloc_strategy(heap_pages)
+ .with_max_runtime_instances(parachain_config.max_runtime_instances)
+ .with_runtime_cache_size(parachain_config.runtime_cache_size)
+ .build();
+
+ let executor = ::new_with_wasm_executor(wasm);
+
+ let (client, backend, keystore_container, mut task_manager) =
+ sc_service::new_full_parts::(
+ parachain_config,
+ telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+ executor,
+ )?;
+ let client = Arc::new(client);
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
+ let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+ let telemetry = telemetry.map(|(worker, telemetry)| {
+ task_manager
+ .spawn_handle()
+ .spawn("telemetry", None, worker.run());
+ telemetry
});
- // Default runtime_cache_size is 2
- // For now we can work with this, but it will likely need
- // to change once we start having runtime_cache_sizes, or
- // run nodes with the maximum for this value
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- Ok(NewPartial {
- client,
- backend,
- transaction_pool,
- telemetry,
- telemetry_worker_handle,
- task_manager,
- keystore_container,
- })
+ let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+ parachain_config.transaction_pool.clone(),
+ parachain_config.role.is_authority().into(),
+ parachain_config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
+
+ let (relay_chain_interface, collator_key) = build_relay_chain_interface(
+ polkadot_config,
+ ¶chain_config,
+ telemetry_worker_handle.clone(),
+ &mut task_manager,
+ collator_options.clone(),
+ hwbench.clone(),
+ )
+ .await
+ .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
+
+ Ok(Self {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ relay_chain_interface,
+ collator_key,
+ cumulus: (),
+ })
+ }
+
+ pub async fn build_cumulus_network(
+ self,
+ parachain_config: &Configuration,
+ para_id: ParaId,
+ import_queue: impl ImportQueue + 'static,
+ ) -> sc_service::error::Result<
+ NodeBuilder>,
+ > {
+ let Self {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ relay_chain_interface,
+ collator_key,
+ cumulus: (),
+ } = self;
+
+ let net_config = FullNetworkConfiguration::new(¶chain_config.network);
+
+ let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+ cumulus_client_service::build_network(cumulus_client_service::BuildNetworkParams {
+ parachain_config: ¶chain_config,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ spawn_handle: task_manager.spawn_handle(),
+ import_queue: import_queue,
+ para_id,
+ relay_chain_interface: relay_chain_interface.clone(),
+ net_config,
+ sybil_resistance_level: CollatorSybilResistance::Resistant,
+ })
+ .await?;
+
+ Ok(NodeBuilder {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ relay_chain_interface,
+ collator_key,
+ cumulus: CumulusNetwork {
+ network,
+ system_rpc_tx,
+ tx_handler_controller,
+ start_network,
+ sync_service,
+ },
+ })
+ }
+}
+
+impl
+ NodeBuilder>
+where
+ Block: cumulus_primitives_core::BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
+ T![ConstructedRuntimeApi]:
+ TaggedTransactionQueue + BlockBuilder + OffchainWorkerApi,
+{
+ pub fn spawn_common_tasks(
+ &mut self,
+ parachain_config: &Configuration,
+ ) -> sc_service::error::Result<()> {
+ if parachain_config.offchain_worker.enabled {
+ self.task_manager.spawn_handle().spawn(
+ "offchain-workers-runner",
+ "offchain-work",
+ sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
+ runtime_api_provider: self.client.clone(),
+ keystore: Some(self.keystore_container.keystore()),
+ offchain_db: self.backend.offchain_storage(),
+ transaction_pool: Some(OffchainTransactionPoolFactory::new(
+ self.transaction_pool.clone(),
+ )),
+ network_provider: self.cumulus.network.clone(),
+ is_validator: parachain_config.role.is_authority(),
+ enable_http_requests: false,
+ custom_extensions: move |_| vec![],
+ })
+ .run(self.client.clone(), self.task_manager.spawn_handle())
+ .boxed(),
+ );
+ }
+
+ Ok(())
+ }
}
diff --git a/container-chains/templates/frontier/node/Cargo.toml b/container-chains/templates/frontier/node/Cargo.toml
index 7ab6c5189..ff67e4984 100644
--- a/container-chains/templates/frontier/node/Cargo.toml
+++ b/container-chains/templates/frontier/node/Cargo.toml
@@ -21,10 +21,10 @@ serde = { workspace = true, features = [ "derive" ] }
url = { workspace = true }
# Local
-node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true }
container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
+node-common = { workspace = true }
tc-consensus = { workspace = true }
# Nimbus
@@ -43,7 +43,6 @@ sc-cli = { workspace = true }
sc-client-api = { workspace = true }
sc-consensus = { workspace = true }
sc-consensus-manual-seal = { workspace = true }
-sp-debug-derive = { workspace = true }
sc-executor = { workspace = true }
sc-network = { workspace = true }
sc-network-common = { workspace = true }
@@ -60,6 +59,7 @@ sp-api = { workspace = true, features = [ "std" ] }
sp-block-builder = { workspace = true }
sp-blockchain = { workspace = true }
sp-consensus = { workspace = true }
+sp-debug-derive = { workspace = true }
sp-consensus-aura = { workspace = true }
sp-core = { workspace = true, features = [ "std" ] }
@@ -97,8 +97,8 @@ fc-cli = { workspace = true }
fc-consensus = { workspace = true }
fc-db = { workspace = true, features = [ "sql" ] }
fc-mapping-sync = { workspace = true, features = [ "sql" ] }
-fc-rpc = { workspace = true, features = ["txpool"] }
-fc-rpc-core = { workspace = true, features = ["txpool"] }
+fc-rpc = { workspace = true, features = [ "txpool" ] }
+fc-rpc-core = { workspace = true, features = [ "txpool" ] }
fc-storage = { workspace = true }
fp-evm = { workspace = true }
fp-rpc = { workspace = true }
@@ -108,11 +108,7 @@ substrate-build-script-utils = { workspace = true }
[features]
default = []
-runtime-benchmarks = [
- "container-chain-template-frontier-runtime/runtime-benchmarks",
- "pallet-ethereum/runtime-benchmarks",
- "polkadot-cli/runtime-benchmarks"
-]
+runtime-benchmarks = [ "container-chain-template-frontier-runtime/runtime-benchmarks", "pallet-ethereum/runtime-benchmarks", "polkadot-cli/runtime-benchmarks" ]
try-runtime = [
"container-chain-template-frontier-runtime/try-runtime",
"try-runtime-cli/try-runtime",
diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs
index 99d31eeae..4b79a51be 100644
--- a/container-chains/templates/frontier/node/src/service.rs
+++ b/container-chains/templates/frontier/node/src/service.rs
@@ -67,7 +67,7 @@ use {
sc_telemetry::{Telemetry, TelemetryWorkerHandle},
};
-use node_common::service::NewPartial;
+use node_common::service::NodeBuilder;
/// Native executor type.
use crate::client::TemplateRuntimeExecutor;
@@ -161,64 +161,66 @@ pub fn new_partial(
// Use ethereum style for subscription ids
config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider));
- let NewPartial {
- client,
- backend,
- transaction_pool,
- telemetry,
- telemetry_worker_handle,
- task_manager,
- keystore_container,
- } = node_common::service::new_partial(config)?;
-
- let maybe_select_chain = if dev_service {
- Some(sc_consensus::LongestChain::new(backend.clone()))
- } else {
- None
- };
-
- let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new())));
- let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
-
- let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend(client.clone(), config)?);
-
- let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone());
-
- let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
- frontier_block_import,
- backend.clone(),
- );
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- parachain_block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- !dev_service,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: maybe_select_chain,
- other: (
- parachain_block_import,
- filter_pool,
- telemetry,
- telemetry_worker_handle,
- frontier_backend,
- fee_history_cache,
- ),
- })
+ todo!()
+
+ // let NodeBuilder {
+ // client,
+ // backend,
+ // transaction_pool,
+ // telemetry,
+ // telemetry_worker_handle,
+ // task_manager,
+ // keystore_container,
+ // } = node_common::service::new_partial(config)?;
+
+ // let maybe_select_chain = if dev_service {
+ // Some(sc_consensus::LongestChain::new(backend.clone()))
+ // } else {
+ // None
+ // };
+
+ // let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new())));
+ // let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
+
+ // let frontier_backend = fc_db::Backend::KeyValue(open_frontier_backend(client.clone(), config)?);
+
+ // let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone());
+
+ // let parachain_block_import = cumulus_client_consensus_common::ParachainBlockImport::new(
+ // frontier_block_import,
+ // backend.clone(),
+ // );
+
+ // let import_queue = nimbus_consensus::import_queue(
+ // client.clone(),
+ // parachain_block_import.clone(),
+ // move |_, _| async move {
+ // let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ // Ok((time,))
+ // },
+ // &task_manager.spawn_essential_handle(),
+ // config.prometheus_registry(),
+ // !dev_service,
+ // )?;
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain: maybe_select_chain,
+ // other: (
+ // parachain_block_import,
+ // filter_pool,
+ // telemetry,
+ // telemetry_worker_handle,
+ // frontier_backend,
+ // fee_history_cache,
+ // ),
+ // })
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/container-chains/templates/frontier/runtime/Cargo.toml b/container-chains/templates/frontier/runtime/Cargo.toml
index d87cb0325..870b2f0d5 100644
--- a/container-chains/templates/frontier/runtime/Cargo.toml
+++ b/container-chains/templates/frontier/runtime/Cargo.toml
@@ -7,14 +7,14 @@ license = "GPL-3.0-only"
version = "0.1.0"
[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
+targets = [ "x86_64-unknown-linux-gnu" ]
[dependencies]
hex-literal = { workspace = true, optional = true }
log = { workspace = true }
-parity-scale-codec = { workspace = true, features = ["derive"] }
-scale-info = { workspace = true, features = ["derive"] }
-serde = { workspace = true, optional = true, features = ["derive"] }
+parity-scale-codec = { workspace = true, features = [ "derive" ] }
+scale-info = { workspace = true, features = [ "derive" ] }
+serde = { workspace = true, optional = true, features = [ "derive" ] }
smallvec = { workspace = true }
# Local
@@ -25,7 +25,7 @@ tp-consensus = { workspace = true }
# Moonkit
nimbus-primitives = { workspace = true }
pallet-author-inherent = { workspace = true }
-pallet-maintenance-mode = { workspace = true, features = ["xcm-support"] }
+pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] }
pallet-migrations = { workspace = true }
xcm-primitives = { workspace = true }
@@ -35,14 +35,14 @@ frame-support = { workspace = true }
frame-system = { workspace = true }
frame-system-rpc-runtime-api = { workspace = true }
frame-try-runtime = { workspace = true, optional = true }
-pallet-balances = { workspace = true, features = ["insecure_zero_ed"] }
+pallet-balances = { workspace = true, features = [ "insecure_zero_ed" ] }
+pallet-proxy = { workspace = true }
pallet-root-testing = { workspace = true }
pallet-sudo = { workspace = true }
pallet-timestamp = { workspace = true }
pallet-transaction-payment = { workspace = true }
pallet-transaction-payment-rpc-runtime-api = { workspace = true }
pallet-utility = { workspace = true }
-pallet-proxy = { workspace = true }
sp-api = { workspace = true }
sp-block-builder = { workspace = true }
sp-consensus-aura = { workspace = true }
@@ -78,10 +78,10 @@ cumulus-primitives-utility = { workspace = true }
parachain-info = { workspace = true }
# Frontier
-fp-account = { workspace = true, features = ["serde"] }
-fp-evm = { workspace = true, features = ["serde"] }
+fp-account = { workspace = true, features = [ "serde" ] }
+fp-evm = { workspace = true, features = [ "serde" ] }
fp-rpc = { workspace = true }
-fp-self-contained = { workspace = true, features = ["serde"] }
+fp-self-contained = { workspace = true, features = [ "serde" ] }
pallet-base-fee = { workspace = true }
pallet-dynamic-fee = { workspace = true }
pallet-ethereum = { workspace = true }
@@ -95,7 +95,7 @@ pallet-hotfix-sufficients = { workspace = true }
substrate-wasm-builder = { workspace = true }
[features]
-default = ["std"]
+default = [ "std" ]
std = [
"ccp-xcm/std",
"cumulus-pallet-dmp-queue/std",
@@ -151,15 +151,15 @@ std = [
"sp-std/std",
"sp-transaction-pool/std",
"sp-version/std",
- "tp-consensus/std",
"staging-xcm-builder/std",
"staging-xcm-executor/std",
"staging-xcm/std",
+ "tp-consensus/std",
"xcm-primitives/std",
]
# Allow to print logs details (no wasm:stripped)
-force-debug = ["sp-debug-derive/force-debug"]
+force-debug = [ "sp-debug-derive/force-debug" ]
runtime-benchmarks = [
"cumulus-pallet-session-benchmarking/runtime-benchmarks",
@@ -191,11 +191,11 @@ try-runtime = [
"pallet-evm/try-runtime",
"pallet-hotfix-sufficients/try-runtime",
"pallet-maintenance-mode/try-runtime",
+ "pallet-proxy/try-runtime",
"pallet-sudo/try-runtime",
"pallet-timestamp/try-runtime",
"pallet-transaction-payment/try-runtime",
"pallet-utility/try-runtime",
- "pallet-proxy/try-runtime",
"parachain-info/try-runtime",
"polkadot-runtime-common/try-runtime",
"sp-runtime/try-runtime",
diff --git a/container-chains/templates/simple/node/Cargo.toml b/container-chains/templates/simple/node/Cargo.toml
index b598a54cd..0fa7a4659 100644
--- a/container-chains/templates/simple/node/Cargo.toml
+++ b/container-chains/templates/simple/node/Cargo.toml
@@ -18,8 +18,8 @@ parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
# Local
-node-common = { workspace = true }
container-chain-template-simple-runtime = { workspace = true, features = [ "std" ] }
+node-common = { workspace = true }
tc-consensus = { workspace = true }
# Nimbus
@@ -85,10 +85,7 @@ substrate-build-script-utils = { workspace = true }
[features]
default = []
-runtime-benchmarks = [
- "container-chain-template-simple-runtime/runtime-benchmarks",
- "polkadot-cli/runtime-benchmarks"
-]
+runtime-benchmarks = [ "container-chain-template-simple-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks" ]
try-runtime = [
"container-chain-template-simple-runtime/try-runtime",
"try-runtime-cli/try-runtime",
diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs
index 6b81d696f..4be4239e6 100644
--- a/container-chains/templates/simple/node/src/service.rs
+++ b/container-chains/templates/simple/node/src/service.rs
@@ -23,7 +23,7 @@ use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfigu
// Local Runtime Types
use {
container_chain_template_simple_runtime::{opaque::Block, RuntimeApi},
- node_common::service::NewPartial,
+ node_common::service::NodeBuilder,
};
// Cumulus Imports
@@ -94,41 +94,42 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let NewPartial {
- client,
- backend,
- transaction_pool,
- telemetry,
- telemetry_worker_handle,
- task_manager,
- keystore_container,
- } = node_common::service::new_partial(config)?;
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: (),
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ todo!()
+ // let NodeBuilder {
+ // client,
+ // backend,
+ // transaction_pool,
+ // telemetry,
+ // telemetry_worker_handle,
+ // task_manager,
+ // keystore_container,
+ // } = node_common::service::NodeBuilder::new(config)?;
+
+ // let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ // let import_queue = nimbus_consensus::import_queue(
+ // client.clone(),
+ // block_import.clone(),
+ // move |_, _| async move {
+ // let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ // Ok((time,))
+ // },
+ // &task_manager.spawn_essential_handle(),
+ // config.prometheus_registry(),
+ // false,
+ // )?;
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain: (),
+ // other: (block_import, telemetry, telemetry_worker_handle),
+ // })
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/container-chains/templates/simple/runtime/Cargo.toml b/container-chains/templates/simple/runtime/Cargo.toml
index 91dcd0d69..d398010ec 100644
--- a/container-chains/templates/simple/runtime/Cargo.toml
+++ b/container-chains/templates/simple/runtime/Cargo.toml
@@ -7,14 +7,14 @@ license = "GPL-3.0-only"
version = "0.1.0"
[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
+targets = [ "x86_64-unknown-linux-gnu" ]
[dependencies]
hex-literal = { workspace = true, optional = true }
log = { workspace = true }
-parity-scale-codec = { workspace = true, features = ["derive"] }
-scale-info = { workspace = true, features = ["derive"] }
-serde = { workspace = true, optional = true, features = ["derive"] }
+parity-scale-codec = { workspace = true, features = [ "derive" ] }
+scale-info = { workspace = true, features = [ "derive" ] }
+serde = { workspace = true, optional = true, features = [ "derive" ] }
smallvec = { workspace = true }
# Local
@@ -24,7 +24,7 @@ tp-consensus = { workspace = true }
# Moonkit
nimbus-primitives = { workspace = true }
pallet-author-inherent = { workspace = true }
-pallet-maintenance-mode = { workspace = true, features = ["xcm-support"] }
+pallet-maintenance-mode = { workspace = true, features = [ "xcm-support" ] }
pallet-migrations = { workspace = true }
xcm-primitives = { workspace = true }
@@ -34,13 +34,13 @@ frame-support = { workspace = true }
frame-system = { workspace = true }
frame-system-rpc-runtime-api = { workspace = true }
pallet-balances = { workspace = true }
+pallet-proxy = { workspace = true }
pallet-session = { workspace = true }
pallet-sudo = { workspace = true }
pallet-timestamp = { workspace = true }
pallet-transaction-payment = { workspace = true }
pallet-transaction-payment-rpc-runtime-api = { workspace = true }
pallet-utility = { workspace = true }
-pallet-proxy = { workspace = true }
sp-api = { workspace = true }
sp-block-builder = { workspace = true }
sp-consensus-aura = { workspace = true }
@@ -83,7 +83,7 @@ frame-try-runtime = { workspace = true, optional = true }
substrate-wasm-builder = { workspace = true }
[features]
-default = ["std"]
+default = [ "std" ]
std = [
"cumulus-pallet-dmp-queue/std",
"cumulus-pallet-parachain-system/std",
@@ -127,15 +127,15 @@ std = [
"sp-std/std",
"sp-transaction-pool/std",
"sp-version/std",
- "tp-consensus/std",
"staging-xcm-builder/std",
"staging-xcm-executor/std",
"staging-xcm/std",
+ "tp-consensus/std",
"xcm-primitives/std",
]
# Allow to print logs details (no wasm:stripped)
-force-debug = ["sp-debug-derive/force-debug"]
+force-debug = [ "sp-debug-derive/force-debug" ]
runtime-benchmarks = [
"cumulus-pallet-session-benchmarking/runtime-benchmarks",
@@ -162,11 +162,11 @@ try-runtime = [
"pallet-balances/try-runtime",
"pallet-cc-authorities-noting/try-runtime",
"pallet-maintenance-mode/try-runtime",
+ "pallet-proxy/try-runtime",
"pallet-session/try-runtime",
"pallet-sudo/try-runtime",
"pallet-timestamp/try-runtime",
"pallet-utility/try-runtime",
- "pallet-proxy/try-runtime",
"parachain-info/try-runtime",
"polkadot-runtime-common/try-runtime",
"sp-runtime/try-runtime",
diff --git a/node/Cargo.toml b/node/Cargo.toml
index 2b88db0b7..fa65ae422 100644
--- a/node/Cargo.toml
+++ b/node/Cargo.toml
@@ -22,10 +22,10 @@ serde_json = { workspace = true }
tokio = { workspace = true }
# Local
-node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] }
dancebox-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
+node-common = { workspace = true }
pallet-author-noting-runtime-api = { workspace = true, features = [ "std" ] }
pallet-collator-assignment-runtime-api = { workspace = true, features = [ "std" ] }
pallet-configuration = { workspace = true, features = [ "std" ] }
@@ -103,11 +103,7 @@ substrate-build-script-utils = { workspace = true }
[features]
default = []
-runtime-benchmarks = [
- "dancebox-runtime/runtime-benchmarks",
- "polkadot-cli/runtime-benchmarks",
- "polkadot-service/runtime-benchmarks"
-]
+runtime-benchmarks = [ "dancebox-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", "polkadot-service/runtime-benchmarks" ]
try-runtime = [ "dancebox-runtime/try-runtime", "nimbus-primitives/try-runtime", "pallet-configuration/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", "sp-runtime/try-runtime", "try-runtime-cli/try-runtime" ]
fast-runtime = [ "dancebox-runtime/fast-runtime" ]
diff --git a/node/src/service.rs b/node/src/service.rs
index 8f6d89e92..ba368deb4 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -16,7 +16,7 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
-use node_common::service::NewPartial;
+use node_common::service::NodeBuilder;
#[allow(deprecated)]
use {
@@ -137,44 +137,45 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let NewPartial {
- client,
- backend,
- transaction_pool,
- telemetry,
- telemetry_worker_handle,
- task_manager,
- keystore_container,
- } = node_common::service::new_partial(config)?;
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- // The nimbus import queue ONLY checks the signature correctness
- // Any other checks corresponding to the author-correctness should be done
- // in the runtime
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: None,
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ todo!()
+ // let NodeBuilder {
+ // client,
+ // backend,
+ // transaction_pool,
+ // telemetry,
+ // telemetry_worker_handle,
+ // task_manager,
+ // keystore_container,
+ // } = node_common::service::NodeBuilder::new(config)?;
+
+ // let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ // // The nimbus import queue ONLY checks the signature correctness
+ // // Any other checks corresponding to the author-correctness should be done
+ // // in the runtime
+ // let import_queue = nimbus_consensus::import_queue(
+ // client.clone(),
+ // block_import.clone(),
+ // move |_, _| async move {
+ // let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ // Ok((time,))
+ // },
+ // &task_manager.spawn_essential_handle(),
+ // config.prometheus_registry(),
+ // false,
+ // )?;
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain: None,
+ // other: (block_import, telemetry, telemetry_worker_handle),
+ // })
}
/// Background task used to detect changes to container chain assignment,
@@ -269,37 +270,88 @@ pub fn new_partial_dev(
>,
sc_service::Error,
> {
- let NewPartial {
- client,
- backend,
- transaction_pool,
- telemetry,
- telemetry_worker_handle,
- task_manager,
- keystore_container,
- } = node_common::service::new_partial(config)?;
+ todo!()
+ // let NodeBuilder {
+ // client,
+ // backend,
+ // transaction_pool,
+ // telemetry,
+ // telemetry_worker_handle,
+ // task_manager,
+ // keystore_container,
+ // } = node_common::service::NodeBuilder::new(config)?;
+
+ // let block_import = DevParachainBlockImport::new(client.clone());
+ // let import_queue = build_manual_seal_import_queue(
+ // client.clone(),
+ // block_import.clone(),
+ // config,
+ // telemetry.as_ref().map(|telemetry| telemetry.handle()),
+ // &task_manager,
+ // )?;
+
+ // let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone()));
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain: maybe_select_chain,
+ // other: (block_import, telemetry, telemetry_worker_handle),
+ // })
+}
- let block_import = DevParachainBlockImport::new(client.clone());
+/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
+///
+/// This is the actual implementation that is abstract over the executor and the runtime api.
+#[sc_tracing::logging::prefix_logs_with("Orchestrator")]
+async fn start_node_impl2(
+ orchestrator_config: Configuration,
+ polkadot_config: Configuration,
+ container_chain_config: Option<(ContainerChainCli, tokio::runtime::Handle)>,
+ collator_options: CollatorOptions,
+ para_id: ParaId,
+ hwbench: Option,
+) -> sc_service::error::Result<(TaskManager, Arc)> {
+ let parachain_config = prepare_node_config(orchestrator_config);
+
+ // Create a `NodeBuilder` which helps setup parachain nodes common systems.
+ let node_builder = node_common::service::NodeBuilder::new(
+ ¶chain_config,
+ polkadot_config,
+ collator_options.clone(),
+ hwbench.clone(),
+ )
+ .await?;
+
+ // This node block import.
+ let block_import = DevParachainBlockImport::new(node_builder.client.clone());
let import_queue = build_manual_seal_import_queue(
- client.clone(),
+ node_builder.client.clone(),
block_import.clone(),
- config,
- telemetry.as_ref().map(|telemetry| telemetry.handle()),
- &task_manager,
+ ¶chain_config,
+ node_builder
+ .telemetry
+ .as_ref()
+ .map(|telemetry| telemetry.handle()),
+ &node_builder.task_manager,
)?;
- let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone()));
+ // Upgrade the NodeBuilder with cumulus capabilities using our block import.
+ let mut node_builder = node_builder
+ .build_cumulus_network(¶chain_config, para_id, import_queue)
+ .await?;
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: maybe_select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ node_builder.spawn_common_tasks(¶chain_config)?;
+
+ // let maybe_select_chain = Some(sc_consensus::LongestChain::new(
+ // node_builder.backend.clone(),
+ // ));
+
+ todo!()
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/pallets/collator-assignment/Cargo.toml b/pallets/collator-assignment/Cargo.toml
index 0e55cb101..be1c8bf7c 100644
--- a/pallets/collator-assignment/Cargo.toml
+++ b/pallets/collator-assignment/Cargo.toml
@@ -9,10 +9,13 @@ version = "0.1.0"
[package.metadata.docs.rs]
targets = [ "x86_64-unknown-linux-gnu" ]
[dependencies]
+frame-benchmarking = { workspace = true }
frame-support = { workspace = true }
frame-system = { workspace = true }
log = { workspace = true }
parity-scale-codec = { workspace = true, features = [ "derive", "max-encoded-len" ] }
+rand = { workspace = true }
+rand_chacha = { workspace = true }
scale-info = { workspace = true }
serde = { workspace = true, optional = true, features = [ "derive" ] }
sp-core = { workspace = true }
@@ -20,9 +23,6 @@ sp-runtime = { workspace = true }
sp-std = { workspace = true }
tp-collator-assignment = { workspace = true }
tp-traits = { workspace = true }
-frame-benchmarking = { workspace = true }
-rand = { workspace = true }
-rand_chacha = { workspace = true }
[dev-dependencies]
sp-io = { workspace = true }
@@ -36,10 +36,10 @@ std = [
"parity-scale-codec/std",
"scale-info/std",
"serde",
- "tp-collator-assignment/std",
- "tp-traits/std",
"sp-runtime/std",
"sp-std/std",
+ "tp-collator-assignment/std",
+ "tp-traits/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
diff --git a/pallets/pooled-staking/Cargo.toml b/pallets/pooled-staking/Cargo.toml
index 31bdfb081..4b2da6707 100644
--- a/pallets/pooled-staking/Cargo.toml
+++ b/pallets/pooled-staking/Cargo.toml
@@ -52,4 +52,4 @@ std = [
"tp-traits/std",
]
runtime-benchmarks = [ "frame-benchmarking" ]
-try-runtime = [ "frame-support/try-runtime" ]
\ No newline at end of file
+try-runtime = [ "frame-support/try-runtime" ]
diff --git a/pallets/registrar/Cargo.toml b/pallets/registrar/Cargo.toml
index c3502a480..c922f4850 100644
--- a/pallets/registrar/Cargo.toml
+++ b/pallets/registrar/Cargo.toml
@@ -31,6 +31,6 @@ sp-runtime = { workspace = true }
[features]
default = [ "std" ]
-std = [ "serde/std", "pallet-configuration/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-balances/std", "parity-scale-codec/std", "scale-info/std", "tp-container-chain-genesis-data/std", "tp-traits/std" ]
+std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-balances/std", "pallet-configuration/std", "parity-scale-codec/std", "scale-info/std", "serde/std", "tp-container-chain-genesis-data/std", "tp-traits/std" ]
runtime-benchmarks = [ "frame-benchmarking", "tp-traits/runtime-benchmarks" ]
try-runtime = [ "frame-support/try-runtime" ]
diff --git a/primitives/container-chain-genesis-data/Cargo.toml b/primitives/container-chain-genesis-data/Cargo.toml
index 627c8df53..23a87b3d2 100644
--- a/primitives/container-chain-genesis-data/Cargo.toml
+++ b/primitives/container-chain-genesis-data/Cargo.toml
@@ -35,5 +35,5 @@ polkadot-primitives = { workspace = true, optional = true }
[features]
default = [ "std" ]
-std = [ "serde/std", "cumulus-primitives-core/std", "frame-support/std", "parity-scale-codec/std", "polkadot-primitives", "serde/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", "sp-trie/std", "tp-traits/std" ]
+std = [ "cumulus-primitives-core/std", "frame-support/std", "parity-scale-codec/std", "polkadot-primitives", "serde/std", "serde/std", "sp-core/std", "sp-runtime/std", "sp-state-machine/std", "sp-std/std", "sp-trie/std", "tp-traits/std" ]
json = [ "hex", "serde_json" ]
diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml
index 996071a5c..7db631602 100644
--- a/primitives/core/Cargo.toml
+++ b/primitives/core/Cargo.toml
@@ -12,12 +12,12 @@ targets = [ "x86_64-unknown-linux-gnu" ]
hex-literal = { workspace = true }
# Substrate
+frame-support = { workspace = true }
parity-scale-codec = { workspace = true }
sp-core = { workspace = true }
sp-io = { workspace = true }
sp-runtime = { workspace = true }
sp-std = { workspace = true }
-frame-support = { workspace = true }
# Cumulus
cumulus-primitives-core = { workspace = true }
diff --git a/primitives/traits/Cargo.toml b/primitives/traits/Cargo.toml
index deb38c889..6307bcd0f 100644
--- a/primitives/traits/Cargo.toml
+++ b/primitives/traits/Cargo.toml
@@ -16,7 +16,7 @@ cumulus-primitives-core = { workspace = true }
[features]
default = [ "std" ]
std = [
- "frame-support/std",
"cumulus-primitives-core/std",
+ "frame-support/std",
]
runtime-benchmarks = []
diff --git a/runtime/dancebox/Cargo.toml b/runtime/dancebox/Cargo.toml
index 085d46251..66e7f1fc6 100644
--- a/runtime/dancebox/Cargo.toml
+++ b/runtime/dancebox/Cargo.toml
@@ -124,67 +124,7 @@ substrate-wasm-builder = { workspace = true }
default = [
"std",
]
-std = [
- "cumulus-pallet-dmp-queue/std",
- "cumulus-pallet-parachain-system/std",
- "cumulus-pallet-xcm/std",
- "cumulus-pallet-xcmp-queue/std",
- "cumulus-primitives-core/std",
- "cumulus-primitives-timestamp/std",
- "cumulus-primitives-utility/std",
- "frame-executive/std",
- "frame-support/std",
- "frame-system-rpc-runtime-api/std",
- "frame-system/std",
- "frame-try-runtime/std",
- "log/std",
- "nimbus-primitives/std",
- "pallet-author-noting-runtime-api/std",
- "pallet-author-noting/std",
- "pallet-authority-assignment/std",
- "pallet-balances/std",
- "pallet-collator-assignment-runtime-api/std",
- "pallet-configuration/std",
- "pallet-inflation-rewards/std",
- "pallet-initializer/std",
- "pallet-invulnerables/std",
- "pallet-maintenance-mode/std",
- "pallet-migrations/std",
- "pallet-proxy/std",
- "pallet-registrar-runtime-api/std",
- "pallet-registrar/std",
- "pallet-session/std",
- "pallet-sudo/std",
- "pallet-timestamp/std",
- "pallet-transaction-payment-rpc-runtime-api/std",
- "pallet-utility/std",
- "pallet-xcm/std",
- "parachain-info/std",
- "parity-scale-codec/std",
- "polkadot-parachain-primitives/std",
- "polkadot-runtime-common/std",
- "scale-info/std",
- "serde",
- "sp-api/std",
- "sp-application-crypto/std",
- "sp-application-crypto/std",
- "sp-block-builder/std",
- "sp-consensus-aura/std",
- "sp-core/std",
- "sp-inherents/std",
- "sp-offchain/std",
- "sp-runtime/std",
- "sp-std/std",
- "sp-transaction-pool/std",
- "sp-version/std",
- "tp-author-noting-inherent/std",
- "tp-consensus/std",
- "tp-traits/std",
- "staging-xcm-builder/std",
- "staging-xcm-executor/std",
- "staging-xcm/std",
- "xcm-primitives/std"
-]
+std = [ "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-primitives-core/std", "cumulus-primitives-timestamp/std", "cumulus-primitives-utility/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "frame-try-runtime/std", "log/std", "nimbus-primitives/std", "pallet-author-noting-runtime-api/std", "pallet-author-noting/std", "pallet-authority-assignment/std", "pallet-balances/std", "pallet-collator-assignment-runtime-api/std", "pallet-configuration/std", "pallet-inflation-rewards/std", "pallet-initializer/std", "pallet-invulnerables/std", "pallet-maintenance-mode/std", "pallet-migrations/std", "pallet-proxy/std", "pallet-registrar-runtime-api/std", "pallet-registrar/std", "pallet-session/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-utility/std", "pallet-xcm/std", "parachain-info/std", "parity-scale-codec/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "scale-info/std", "serde", "sp-api/std", "sp-application-crypto/std", "sp-application-crypto/std", "sp-block-builder/std", "sp-consensus-aura/std", "sp-core/std", "sp-inherents/std", "sp-offchain/std", "sp-runtime/std", "sp-std/std", "sp-transaction-pool/std", "sp-version/std", "staging-xcm-builder/std", "staging-xcm-executor/std", "staging-xcm/std", "tp-author-noting-inherent/std", "tp-consensus/std", "tp-traits/std", "xcm-primitives/std" ]
# Allow to print logs details (no wasm:stripped)
force-debug = [ "sp-debug-derive/force-debug" ]
@@ -203,8 +143,8 @@ runtime-benchmarks = [
"pallet-pooled-staking/runtime-benchmarks",
"pallet-registrar/runtime-benchmarks",
"pallet-timestamp/runtime-benchmarks",
- "pallet-xcm/runtime-benchmarks",
"pallet-xcm-benchmarks/runtime-benchmarks",
+ "pallet-xcm/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"staging-xcm-builder/runtime-benchmarks",
]
From df1d4f7544d6dedfabf4f3c0a3c1ab4a6e397478 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:24:55 +0100
Subject: [PATCH 08/29] sc_service::spawn_tasks with custom rpc_builder
---
client/node-common/src/service.rs | 136 ++++++++++++++++++++++++------
node/src/service.rs | 19 ++++-
2 files changed, 129 insertions(+), 26 deletions(-)
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 4c3ff78a4..04633032f 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -13,16 +13,13 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-
-use {futures::FutureExt, sp_offchain::OffchainWorkerApi};
-
use {
cumulus_client_cli::CollatorOptions,
- cumulus_client_service::{
- build_relay_chain_interface, prepare_node_config, CollatorSybilResistance,
- },
+ cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance},
cumulus_primitives_core::ParaId,
cumulus_relay_chain_interface::RelayChainInterface,
+ futures::FutureExt,
+ jsonrpsee::RpcModule,
polkadot_primitives::CollatorPair,
sc_client_api::Backend,
sc_consensus::ImportQueue,
@@ -33,6 +30,7 @@ use {
sc_network::{config::FullNetworkConfiguration, NetworkService},
sc_network_sync::SyncingService,
sc_network_transactions::TransactionsHandlerController,
+ sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor},
sc_service::{
Configuration, KeystoreContainer, NetworkStarter, TFullBackend, TFullClient, TaskManager,
},
@@ -41,6 +39,7 @@ use {
sc_utils::mpsc::TracingUnboundedSender,
sp_api::ConstructRuntimeApi,
sp_block_builder::BlockBuilder,
+ sp_offchain::OffchainWorkerApi,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
std::sync::Arc,
};
@@ -67,7 +66,6 @@ macro_rules! T {
pub struct CumulusNetwork {
pub network: Arc>,
pub system_rpc_tx: TracingUnboundedSender>,
- pub tx_handler_controller: TransactionsHandlerController,
pub start_network: NetworkStarter,
pub sync_service: Arc>,
}
@@ -82,6 +80,9 @@ pub struct NodeBuilder<
// caller create the `import_queue` using systems contained in `NodeBuilder`,
// then call `build_cumulus_network` with it to generate the cumulus systems.
Cumulus = (),
+ // The `TxHandler` is constructed in `build_cumulus_network`
+ // and is then consumed when calling `spawn_common_tasks`.
+ TxHandler = (),
> where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
@@ -100,10 +101,11 @@ pub struct NodeBuilder<
pub collator_key: Option,
pub cumulus: Cumulus,
+ pub tx_handler_controller: TxHandler,
}
impl
- NodeBuilder
+ NodeBuilder
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
@@ -112,6 +114,7 @@ where
+ BlockBuilder
+ cumulus_primitives_core::CollectCollationInfo,
{
+ // Refactor: old new_partial + build_relay_chain_interface
pub async fn new(
parachain_config: &Configuration,
polkadot_config: Configuration,
@@ -197,16 +200,25 @@ where
relay_chain_interface,
collator_key,
cumulus: (),
+ tx_handler_controller: (),
})
}
+ /// Given an import queue, calls `cumulus_client_service::build_network` and
+ /// stores the returned objects in `self.cumulus` and `self.tx_handler_controller`.
pub async fn build_cumulus_network(
self,
parachain_config: &Configuration,
para_id: ParaId,
import_queue: impl ImportQueue + 'static,
) -> sc_service::error::Result<
- NodeBuilder>,
+ NodeBuilder<
+ Block,
+ RuntimeApi,
+ ParachainNativeExecutor,
+ CumulusNetwork,
+ TransactionsHandlerController,
+ >,
> {
let Self {
client,
@@ -219,6 +231,7 @@ where
relay_chain_interface,
collator_key,
cumulus: (),
+ tx_handler_controller: (),
} = self;
let net_config = FullNetworkConfiguration::new(¶chain_config.network);
@@ -250,48 +263,121 @@ where
cumulus: CumulusNetwork {
network,
system_rpc_tx,
- tx_handler_controller,
start_network,
sync_service,
},
+ tx_handler_controller,
})
}
}
impl
- NodeBuilder>
+ NodeBuilder<
+ Block,
+ RuntimeApi,
+ ParachainNativeExecutor,
+ CumulusNetwork,
+ TransactionsHandlerController,
+ >
where
Block: cumulus_primitives_core::BlockT,
+ Block::Hash: Unpin,
+ Block::Header: Unpin,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
- T![ConstructedRuntimeApi]:
- TaggedTransactionQueue + BlockBuilder + OffchainWorkerApi,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue
+ + BlockBuilder
+ + OffchainWorkerApi
+ + sp_api::Metadata
+ + sp_session::SessionKeys,
{
- pub fn spawn_common_tasks(
- &mut self,
- parachain_config: &Configuration,
- ) -> sc_service::error::Result<()> {
+ /// Given an `rpc_builder`, spawns the common tasks of a Substrate + Cumulus
+ /// node. It consumes `self.tx_handler_controller` in the process.
+ pub fn spawn_common_tasks(
+ self,
+ parachain_config: Configuration,
+ rpc_builder: Box<
+ dyn Fn(
+ DenyUnsafe,
+ SubscriptionTaskExecutor,
+ ) -> Result, sc_service::Error>,
+ >,
+ ) -> sc_service::error::Result<
+ NodeBuilder, ()>,
+ > {
+ let NodeBuilder {
+ client,
+ backend,
+ transaction_pool,
+ mut telemetry,
+ telemetry_worker_handle,
+ mut task_manager,
+ keystore_container,
+ relay_chain_interface,
+ collator_key,
+ cumulus:
+ CumulusNetwork {
+ network,
+ system_rpc_tx,
+ start_network,
+ sync_service,
+ },
+ tx_handler_controller,
+ } = self;
+
if parachain_config.offchain_worker.enabled {
- self.task_manager.spawn_handle().spawn(
+ task_manager.spawn_handle().spawn(
"offchain-workers-runner",
"offchain-work",
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
- runtime_api_provider: self.client.clone(),
- keystore: Some(self.keystore_container.keystore()),
- offchain_db: self.backend.offchain_storage(),
+ runtime_api_provider: client.clone(),
+ keystore: Some(keystore_container.keystore()),
+ offchain_db: backend.offchain_storage(),
transaction_pool: Some(OffchainTransactionPoolFactory::new(
- self.transaction_pool.clone(),
+ transaction_pool.clone(),
)),
- network_provider: self.cumulus.network.clone(),
+ network_provider: network.clone(),
is_validator: parachain_config.role.is_authority(),
enable_http_requests: false,
custom_extensions: move |_| vec![],
})
- .run(self.client.clone(), self.task_manager.spawn_handle())
+ .run(client.clone(), task_manager.spawn_handle())
.boxed(),
);
}
- Ok(())
+ sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+ rpc_builder,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ task_manager: &mut task_manager,
+ config: parachain_config,
+ keystore: keystore_container.keystore(),
+ backend: backend.clone(),
+ network: network.clone(),
+ system_rpc_tx: system_rpc_tx.clone(),
+ tx_handler_controller,
+ telemetry: telemetry.as_mut(),
+ sync_service: sync_service.clone(),
+ })?;
+
+ Ok(NodeBuilder {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ relay_chain_interface,
+ collator_key,
+ cumulus: CumulusNetwork {
+ network,
+ system_rpc_tx,
+ start_network,
+ sync_service,
+ },
+ tx_handler_controller: (),
+ })
}
}
diff --git a/node/src/service.rs b/node/src/service.rs
index ba368deb4..dc46b0d84 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -345,7 +345,24 @@ async fn start_node_impl2(
.build_cumulus_network(¶chain_config, para_id, import_queue)
.await?;
- node_builder.spawn_common_tasks(¶chain_config)?;
+ let rpc_builder = {
+ let client = node_builder.client.clone();
+ let transaction_pool = node_builder.transaction_pool.clone();
+
+ Box::new(move |deny_unsafe, _| {
+ let deps = crate::rpc::FullDeps {
+ client: client.clone(),
+ pool: transaction_pool.clone(),
+ deny_unsafe,
+ command_sink: None,
+ xcm_senders: None,
+ };
+
+ crate::rpc::create_full(deps).map_err(Into::into)
+ })
+ };
+
+ node_builder.spawn_common_tasks(parachain_config, rpc_builder)?;
// let maybe_select_chain = Some(sc_consensus::LongestChain::new(
// node_builder.backend.clone(),
From c3fca0931a4a19f8afda5a22259a109e64835e21 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Mon, 6 Nov 2023 14:30:18 +0100
Subject: [PATCH 09/29] hwbench
---
client/node-common/src/service.rs | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 04633032f..5de2b9295 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -13,11 +13,13 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
+
use {
cumulus_client_cli::CollatorOptions,
cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance},
cumulus_primitives_core::ParaId,
cumulus_relay_chain_interface::RelayChainInterface,
+ frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE,
futures::FutureExt,
jsonrpsee::RpcModule,
polkadot_primitives::CollatorPair,
@@ -99,6 +101,7 @@ pub struct NodeBuilder<
pub relay_chain_interface: Arc,
pub collator_key: Option,
+ pub hwbench: Option,
pub cumulus: Cumulus,
pub tx_handler_controller: TxHandler,
@@ -199,6 +202,7 @@ where
keystore_container,
relay_chain_interface,
collator_key,
+ hwbench,
cumulus: (),
tx_handler_controller: (),
})
@@ -230,6 +234,7 @@ where
keystore_container,
relay_chain_interface,
collator_key,
+ hwbench,
cumulus: (),
tx_handler_controller: (),
} = self;
@@ -260,6 +265,7 @@ where
keystore_container,
relay_chain_interface,
collator_key,
+ hwbench,
cumulus: CumulusNetwork {
network,
system_rpc_tx,
@@ -315,6 +321,7 @@ where
keystore_container,
relay_chain_interface,
collator_key,
+ hwbench,
cumulus:
CumulusNetwork {
network,
@@ -325,6 +332,8 @@ where
tx_handler_controller,
} = self;
+ let collator = parachain_config.role.is_authority();
+
if parachain_config.offchain_worker.enabled {
task_manager.spawn_handle().spawn(
"offchain-workers-runner",
@@ -361,6 +370,27 @@ where
sync_service: sync_service.clone(),
})?;
+ if let Some(hwbench) = &hwbench {
+ sc_sysinfo::print_hwbench(&hwbench);
+ // Here you can check whether the hardware meets your chains' requirements. Putting a link
+ // in there and swapping out the requirements for your own are probably a good idea. The
+ // requirements for a para-chain are dictated by its relay-chain.
+ if collator && !SUBSTRATE_REFERENCE_HARDWARE.check_hardware(&hwbench) {
+ log::warn!(
+ "⚠️ The hardware does not meet the minimal requirements for role 'Authority'."
+ );
+ }
+
+ if let Some(ref mut telemetry) = telemetry {
+ let telemetry_handle = telemetry.handle();
+ task_manager.spawn_handle().spawn(
+ "telemetry_hwbench",
+ None,
+ sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench.clone()),
+ );
+ }
+ }
+
Ok(NodeBuilder {
client,
backend,
@@ -371,6 +401,7 @@ where
keystore_container,
relay_chain_interface,
collator_key,
+ hwbench,
cumulus: CumulusNetwork {
network,
system_rpc_tx,
From bc397ca845ea37418237c42990d13f6dbf2a3c78 Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Mon, 6 Nov 2023 15:56:31 +0100
Subject: [PATCH 10/29] use `core_extensions::TypeIdentity` to simplify code
---
Cargo.lock | 7 ++
Cargo.toml | 1 +
client/node-common/Cargo.toml | 1 +
client/node-common/src/service.rs | 119 ++++++++++++++++++------------
4 files changed, 80 insertions(+), 48 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index 1d3742b80..fa368431a 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1887,6 +1887,12 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "core_extensions"
+version = "1.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92c71dc07c9721607e7a16108336048ee978c3a8b129294534272e8bac96c0ee"
+
[[package]]
name = "cpp_demangle"
version = "0.3.5"
@@ -6846,6 +6852,7 @@ dependencies = [
"async-io",
"async-trait",
"clap",
+ "core_extensions",
"cumulus-client-cli",
"cumulus-client-consensus-aura",
"cumulus-client-consensus-common",
diff --git a/Cargo.toml b/Cargo.toml
index 225699beb..47c9caec4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -234,6 +234,7 @@ smallvec = "1.10.0"
async-io = "1.3"
async-trait = "0.1"
clap = { version = "4.1.6", default-features = false, features = [ "derive" ] }
+core_extensions = "1.5.3"
exit-future = { version = "0.2.0" }
flume = "0.10.9"
futures = { version = "0.3.1" }
diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml
index a251f5143..f31f7a275 100644
--- a/client/node-common/Cargo.toml
+++ b/client/node-common/Cargo.toml
@@ -15,6 +15,7 @@ jsonrpsee = { workspace = true, features = [ "server" ] }
log = { workspace = true }
parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
+core_extensions = { workspace = true, features = [ "type_identity" ]}
# Local
tc-consensus = { workspace = true }
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 5de2b9295..3596b265a 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -15,6 +15,7 @@
// along with Tanssi. If not, see .
use {
+ core_extensions::TypeIdentity,
cumulus_client_cli::CollatorOptions,
cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance},
cumulus_primitives_core::ParaId,
@@ -72,6 +73,19 @@ pub struct CumulusNetwork {
pub sync_service: Arc>,
}
+// `Cumulus` and `TxHandler` are types that will change during the life of
+// a `NodeBuilder` because they are generated and consumed when calling
+// certain functions, with absence of data represented with `()`. Some
+// function are implemented only for a given concrete type, which ensure it
+// can only be called if the required data is available (generated and not yet
+// consumed).
+//
+// While this could be implemented with multiple impl blocks with concrete types,
+// we use here `core_extensions::TypeIdentity` which allow to express type
+// identity/equality as a trait bound on each function as it removes the
+// boilerplate of many impl block with duplicated trait bounds. 2 impl blocks
+// are still required since Rust can't infer the types in the `new` function
+// that doesn't take `self`.
pub struct NodeBuilder<
Block,
RuntimeApi,
@@ -107,6 +121,9 @@ pub struct NodeBuilder<
pub tx_handler_controller: TxHandler,
}
+// `new` function doesn't take self, and the Rust compiler cannot infer that
+// only one type T implements `TypeIdentity`. With thus need a separate impl
+// block with concrete types `()`.
impl
NodeBuilder
where
@@ -115,15 +132,19 @@ where
RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
T![ConstructedRuntimeApi]: TaggedTransactionQueue
+ BlockBuilder
- + cumulus_primitives_core::CollectCollationInfo,
{
- // Refactor: old new_partial + build_relay_chain_interface
+ /// Create a new `NodeBuilder` which prepare objects required to launch a
+ /// node. However it doesn't start anything, and doesn't provide any
+ /// cumulus-dependent objects (as it requires an import queue, which usually
+ /// is different for each node).
pub async fn new(
parachain_config: &Configuration,
polkadot_config: Configuration,
collator_options: CollatorOptions,
hwbench: Option,
) -> Result {
+ // Refactor: old new_partial + build_relay_chain_interface
+
let telemetry = parachain_config
.telemetry_endpoints
.clone()
@@ -203,13 +224,27 @@ where
relay_chain_interface,
collator_key,
hwbench,
- cumulus: (),
- tx_handler_controller: (),
+ cumulus: TypeIdentity::from_type(()),
+ tx_handler_controller: TypeIdentity::from_type(()),
})
}
+}
+impl
+ NodeBuilder
+where
+ Block: cumulus_primitives_core::BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue
+ + BlockBuilder
+ + cumulus_primitives_core::CollectCollationInfo,
+{
/// Given an import queue, calls `cumulus_client_service::build_network` and
/// stores the returned objects in `self.cumulus` and `self.tx_handler_controller`.
+ ///
+ /// Can only be called once on a `NodeBuilder` that doesn't have yet cumulus
+ /// data.
pub async fn build_cumulus_network(
self,
parachain_config: &Configuration,
@@ -223,7 +258,11 @@ where
CumulusNetwork,
TransactionsHandlerController,
>,
- > {
+ >
+ where
+ Cumulus: TypeIdentity,
+ TxHandler: TypeIdentity,
+ {
let Self {
client,
backend,
@@ -235,8 +274,8 @@ where
relay_chain_interface,
collator_key,
hwbench,
- cumulus: (),
- tx_handler_controller: (),
+ cumulus: _,
+ tx_handler_controller: _,
} = self;
let net_config = FullNetworkConfiguration::new(¶chain_config.network);
@@ -275,30 +314,11 @@ where
tx_handler_controller,
})
}
-}
-impl
- NodeBuilder<
- Block,
- RuntimeApi,
- ParachainNativeExecutor,
- CumulusNetwork,
- TransactionsHandlerController,
- >
-where
- Block: cumulus_primitives_core::BlockT,
- Block::Hash: Unpin,
- Block::Header: Unpin,
- ParachainNativeExecutor: NativeExecutionDispatch + 'static,
- RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
- T![ConstructedRuntimeApi]: TaggedTransactionQueue
- + BlockBuilder
- + OffchainWorkerApi
- + sp_api::Metadata
- + sp_session::SessionKeys,
-{
/// Given an `rpc_builder`, spawns the common tasks of a Substrate + Cumulus
- /// node. It consumes `self.tx_handler_controller` in the process.
+ /// node. It consumes `self.tx_handler_controller` in the process, which means
+ /// it can only be called once, and any other code that would need this
+ /// controller should interact with it before calling this function.
pub fn spawn_common_tasks(
self,
parachain_config: Configuration,
@@ -310,7 +330,18 @@ where
>,
) -> sc_service::error::Result<
NodeBuilder, ()>,
- > {
+ >
+ where
+ Cumulus: TypeIdentity>,
+ TxHandler: TypeIdentity>,
+ Block::Hash: Unpin,
+ Block::Header: Unpin,
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue
+ + BlockBuilder
+ + OffchainWorkerApi
+ + sp_api::Metadata
+ + sp_session::SessionKeys,
+ {
let NodeBuilder {
client,
backend,
@@ -322,16 +353,13 @@ where
relay_chain_interface,
collator_key,
hwbench,
- cumulus:
- CumulusNetwork {
- network,
- system_rpc_tx,
- start_network,
- sync_service,
- },
+ cumulus,
tx_handler_controller,
} = self;
+ let cumulus = TypeIdentity::into_type(cumulus);
+ let tx_handler_controller = TypeIdentity::into_type(tx_handler_controller);
+
let collator = parachain_config.role.is_authority();
if parachain_config.offchain_worker.enabled {
@@ -345,7 +373,7 @@ where
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
- network_provider: network.clone(),
+ network_provider: cumulus.network.clone(),
is_validator: parachain_config.role.is_authority(),
enable_http_requests: false,
custom_extensions: move |_| vec![],
@@ -363,11 +391,11 @@ where
config: parachain_config,
keystore: keystore_container.keystore(),
backend: backend.clone(),
- network: network.clone(),
- system_rpc_tx: system_rpc_tx.clone(),
+ network: cumulus.network.clone(),
+ system_rpc_tx: cumulus.system_rpc_tx.clone(),
tx_handler_controller,
telemetry: telemetry.as_mut(),
- sync_service: sync_service.clone(),
+ sync_service: cumulus.sync_service.clone(),
})?;
if let Some(hwbench) = &hwbench {
@@ -402,13 +430,8 @@ where
relay_chain_interface,
collator_key,
hwbench,
- cumulus: CumulusNetwork {
- network,
- system_rpc_tx,
- start_network,
- sync_service,
- },
- tx_handler_controller: (),
+ cumulus: TypeIdentity::from_type(cumulus),
+ tx_handler_controller: TypeIdentity::from_type(()),
})
}
}
From 5df2e955da65cf167f0b82ab7032cd1959f3730e Mon Sep 17 00:00:00 2001
From: nanocryk <6422796+nanocryk@users.noreply.github.com>
Date: Tue, 7 Nov 2023 11:22:44 +0100
Subject: [PATCH 11/29] support substrate/cumulus network + manual_seal
---
Cargo.lock | 1 +
client/node-common/Cargo.toml | 3 +-
client/node-common/src/service.rs | 315 +++++++++++++++---
.../templates/frontier/node/src/service.rs | 10 +-
node/src/cli.rs | 3 +-
node/src/service.rs | 136 +++++---
6 files changed, 357 insertions(+), 111 deletions(-)
diff --git a/Cargo.lock b/Cargo.lock
index fa368431a..e24a199ae 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6861,6 +6861,7 @@ dependencies = [
"cumulus-primitives-core",
"cumulus-primitives-parachain-inherent",
"cumulus-relay-chain-interface",
+ "flume",
"frame-benchmarking",
"frame-benchmarking-cli",
"futures 0.3.28",
diff --git a/client/node-common/Cargo.toml b/client/node-common/Cargo.toml
index f31f7a275..78b7c44d5 100644
--- a/client/node-common/Cargo.toml
+++ b/client/node-common/Cargo.toml
@@ -10,12 +10,13 @@ version = "0.1.0"
async-io = { workspace = true }
async-trait = { workspace = true }
clap = { workspace = true, features = [ "derive" ] }
+core_extensions = { workspace = true, features = [ "type_identity" ] }
+flume = { workspace = true }
futures = { workspace = true }
jsonrpsee = { workspace = true, features = [ "server" ] }
log = { workspace = true }
parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
-core_extensions = { workspace = true, features = [ "type_identity" ]}
# Local
tc-consensus = { workspace = true }
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 3596b265a..832c3c0b3 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -15,17 +15,22 @@
// along with Tanssi. If not, see .
use {
+ async_io::Timer,
+ core::time::Duration,
core_extensions::TypeIdentity,
cumulus_client_cli::CollatorOptions,
cumulus_client_service::{build_relay_chain_interface, CollatorSybilResistance},
cumulus_primitives_core::ParaId,
cumulus_relay_chain_interface::RelayChainInterface,
frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE,
- futures::FutureExt,
+ futures::{channel::mpsc, FutureExt, Stream, StreamExt},
jsonrpsee::RpcModule,
polkadot_primitives::CollatorPair,
sc_client_api::Backend,
- sc_consensus::ImportQueue,
+ sc_consensus::{block_import, BlockImport, ImportQueue},
+ sc_consensus_manual_seal::{
+ run_manual_seal, ConsensusDataProvider, EngineCommand, ManualSealParams,
+ },
sc_executor::{
HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
DEFAULT_HEAP_ALLOC_STRATEGY,
@@ -42,9 +47,13 @@ use {
sc_utils::mpsc::TracingUnboundedSender,
sp_api::ConstructRuntimeApi,
sp_block_builder::BlockBuilder,
+ sp_consensus::{EnableProofRecording, SelectChain},
+ sp_core::H256,
+ sp_inherents::CreateInherentDataProviders,
sp_offchain::OffchainWorkerApi,
+ sp_runtime::Percent,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
- std::sync::Arc,
+ std::{str::FromStr, sync::Arc},
};
/// Functions in this module are generic over `Block`, `RuntimeApi`, and
@@ -66,7 +75,7 @@ macro_rules! T {
}
}
-pub struct CumulusNetwork {
+pub struct Network {
pub network: Arc>,
pub system_rpc_tx: TracingUnboundedSender>,
pub start_network: NetworkStarter,
@@ -90,13 +99,13 @@ pub struct NodeBuilder<
Block,
RuntimeApi,
ParachainNativeExecutor,
- // `cumulus_client_service::build_network` returns many important systems,
+ // `(cumulus_client_service/sc_service)::build_network` returns many important systems,
// but can only be called with an `import_queue` which can be different in
// each node. For that reason it is a `()` when calling `new`, then the
// caller create the `import_queue` using systems contained in `NodeBuilder`,
// then call `build_cumulus_network` with it to generate the cumulus systems.
- Cumulus = (),
- // The `TxHandler` is constructed in `build_cumulus_network`
+ Network = (),
+ // The `TxHandler` is constructed in `build_X_network`
// and is then consumed when calling `spawn_common_tasks`.
TxHandler = (),
> where
@@ -113,11 +122,10 @@ pub struct NodeBuilder<
pub telemetry: Option,
pub telemetry_worker_handle: Option,
- pub relay_chain_interface: Arc,
- pub collator_key: Option,
pub hwbench: Option,
+ pub prometheus_registry: Option,
- pub cumulus: Cumulus,
+ pub network: Network,
pub tx_handler_controller: TxHandler,
}
@@ -130,8 +138,7 @@ where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi + Sync + Send + 'static,
- T![ConstructedRuntimeApi]: TaggedTransactionQueue
- + BlockBuilder
+ T![ConstructedRuntimeApi]: TaggedTransactionQueue + BlockBuilder