diff --git a/Cargo.lock b/Cargo.lock
index 7fa980140..8302fd273 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1747,6 +1747,7 @@ dependencies = [
"log",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"parity-scale-codec",
"polkadot-cli",
"polkadot-primitives",
@@ -14637,6 +14638,7 @@ dependencies = [
"manual-xcm-rpc",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"pallet-author-noting-runtime-api",
"pallet-collator-assignment-runtime-api",
"pallet-configuration",
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index c7e9094c5..0e8dc891d 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -14,14 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use {sp_api::ConstructRuntimeApi, sp_transaction_pool::runtime_api::TaggedTransactionQueue};
+use sp_block_builder::BlockBuilder;
use {
cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
- sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch},
+ sc_executor::{
+ HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
+ DEFAULT_HEAP_ALLOC_STRATEGY,
+ },
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
- sc_telemetry::{Telemetry, TelemetryWorkerHandle},
- sc_transaction_pool::ChainApi,
+ sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sp_api::ConstructRuntimeApi,
+ sp_transaction_pool::runtime_api::TaggedTransactionQueue,
std::sync::Arc,
};
@@ -35,14 +39,12 @@ pub type ParachainBlockImport = TPar
Arc>,
ParachainBackend,
>;
-
-type ConstructedRuntimeApi =
+pub type ConstructedRuntimeApi =
>::RuntimeApi;
-pub trait BlockT: cumulus_primitives_core::BlockT {}
-
pub fn new_partial(
config: &Configuration,
+ select_chain: SelectChain,
) -> Result<
PartialComponents<
ParachainClient,
@@ -62,7 +64,7 @@ pub fn new_partial(
sc_service::Error,
>
where
- Block: BlockT,
+ Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi>
+ Sync
@@ -72,7 +74,88 @@ where
Block,
ParachainClient,
RuntimeApi,
- >: TaggedTransactionQueue,
+ >: TaggedTransactionQueue + BlockBuilder,
{
- todo!()
+ let telemetry = config
+ .telemetry_endpoints
+ .clone()
+ .filter(|x| !x.is_empty())
+ .map(|endpoints| -> Result<_, sc_telemetry::Error> {
+ let worker = TelemetryWorker::new(16)?;
+ let telemetry = worker.handle().new_telemetry(endpoints);
+ Ok((worker, telemetry))
+ })
+ .transpose()?;
+
+ let heap_pages = config
+ .default_heap_pages
+ .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
+ extra_pages: h as _,
+ });
+
+ // Default runtime_cache_size is 2
+ // For now we can work with this, but it will likely need
+ // to change once we start having runtime_cache_sizes, or
+ // run nodes with the maximum for this value
+ let wasm = WasmExecutor::builder()
+ .with_execution_method(config.wasm_method)
+ .with_onchain_heap_alloc_strategy(heap_pages)
+ .with_offchain_heap_alloc_strategy(heap_pages)
+ .with_max_runtime_instances(config.max_runtime_instances)
+ .with_runtime_cache_size(config.runtime_cache_size)
+ .build();
+
+ let executor: ParachainExecutor =
+ ParachainExecutor::new_with_wasm_executor(wasm);
+
+ let (client, backend, keystore_container, task_manager) =
+ sc_service::new_full_parts::(
+ config,
+ telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+ executor,
+ )?;
+ let client = Arc::new(client);
+
+ let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+ let telemetry = telemetry.map(|(worker, telemetry)| {
+ task_manager
+ .spawn_handle()
+ .spawn("telemetry", None, worker.run());
+ telemetry
+ });
+
+ let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+ config.transaction_pool.clone(),
+ config.role.is_authority().into(),
+ config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain,
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
diff --git a/container-chains/templates/simple/node/Cargo.toml b/container-chains/templates/simple/node/Cargo.toml
index 252ba852b..b598a54cd 100644
--- a/container-chains/templates/simple/node/Cargo.toml
+++ b/container-chains/templates/simple/node/Cargo.toml
@@ -18,6 +18,7 @@ parity-scale-codec = { workspace = true }
serde = { workspace = true, features = [ "derive" ] }
# Local
+node-common = { workspace = true }
container-chain-template-simple-runtime = { workspace = true, features = [ "std" ] }
tc-consensus = { workspace = true }
diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs
index 550bc33d9..0a8b23ac1 100644
--- a/container-chains/templates/simple/node/src/service.rs
+++ b/container-chains/templates/simple/node/src/service.rs
@@ -92,83 +92,7 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: (),
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ node_common::service::new_partial(config, ())
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/node/Cargo.toml b/node/Cargo.toml
index 593a8e56f..2b88db0b7 100644
--- a/node/Cargo.toml
+++ b/node/Cargo.toml
@@ -22,6 +22,7 @@ serde_json = { workspace = true }
tokio = { workspace = true }
# Local
+node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true, features = [ "std" ] }
dancebox-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
diff --git a/node/src/container_chain_spawner.rs b/node/src/container_chain_spawner.rs
index c535133ad..ba8c52f00 100644
--- a/node/src/container_chain_spawner.rs
+++ b/node/src/container_chain_spawner.rs
@@ -49,8 +49,10 @@ use {
time::Instant,
},
tc_orchestrator_chain_interface::OrchestratorChainInterface,
- tokio::sync::{mpsc, oneshot},
- tokio::time::{sleep, Duration},
+ tokio::{
+ sync::{mpsc, oneshot},
+ time::{sleep, Duration},
+ },
};
/// Struct with all the params needed to start a container chain node given the CLI arguments,
diff --git a/node/src/service.rs b/node/src/service.rs
index 4d85901b3..b6f799c05 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -137,87 +137,7 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
- // The nimbus import queue ONLY checks the signature correctness
- // Any other checks corresponding to the author-correctness should be done
- // in the runtime
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- let maybe_select_chain = None;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: maybe_select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+ node_common::service::new_partial(config, None)
}
/// Background task used to detect changes to container chain assignment,