diff --git a/Cargo.lock b/Cargo.lock
index 8302fd273..964c06a78 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1600,6 +1600,7 @@ dependencies = [
"manual-xcm-rpc",
"nimbus-consensus",
"nimbus-primitives",
+ "node-common",
"pallet-ethereum",
"pallet-transaction-payment-rpc",
"pallet-transaction-payment-rpc-runtime-api",
diff --git a/client/node-common/src/service.rs b/client/node-common/src/service.rs
index 0e8dc891d..c934ce736 100644
--- a/client/node-common/src/service.rs
+++ b/client/node-common/src/service.rs
@@ -14,7 +14,10 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use sp_block_builder::BlockBuilder;
+use {
+ sc_service::{KeystoreContainer, TaskManager},
+ sp_block_builder::BlockBuilder,
+};
use {
cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
@@ -22,7 +25,7 @@ use {
HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
DEFAULT_HEAP_ALLOC_STRATEGY,
},
- sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
+ sc_service::{Configuration, TFullBackend, TFullClient},
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
sp_api::ConstructRuntimeApi,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
@@ -42,25 +45,53 @@ pub type ParachainBlockImport = TPar
pub type ConstructedRuntimeApi =
>::RuntimeApi;
-pub fn new_partial(
- config: &Configuration,
- select_chain: SelectChain,
-) -> Result<
- PartialComponents<
+pub struct NewPartial
+where
+ Block: cumulus_primitives_core::BlockT,
+ ParachainNativeExecutor: NativeExecutionDispatch + 'static,
+ RuntimeApi: ConstructRuntimeApi>
+ + Sync
+ + Send
+ + 'static,
+ ConstructedRuntimeApi<
+ Block,
ParachainClient,
- ParachainBackend,
- SelectChain,
- sc_consensus::DefaultImportQueue,
+ RuntimeApi,
+ >: TaggedTransactionQueue + BlockBuilder,
+{
+ pub client: Arc>,
+ pub backend: Arc>,
+ pub task_manager: TaskManager,
+ pub keystore_container: KeystoreContainer,
+ pub transaction_pool: Arc<
sc_transaction_pool::FullPool<
Block,
ParachainClient,
>,
- (
- ParachainBlockImport,
- Option,
- Option,
- ),
>,
+ pub telemetry: Option,
+ pub telemetry_worker_handle: Option,
+}
+
+pub fn new_partial(
+ config: &Configuration,
+) -> Result<
+ // PartialComponents<
+ // ParachainClient,
+ // ParachainBackend,
+ // SelectChain,
+ // sc_consensus::DefaultImportQueue,
+ // sc_transaction_pool::FullPool<
+ // Block,
+ // ParachainClient,
+ // >,
+ // (
+ // ParachainBlockImport,
+ // Option,
+ // Option,
+ // ),
+ // >,
+ NewPartial,
sc_service::Error,
>
where
@@ -133,29 +164,38 @@ where
client.clone(),
);
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = nimbus_consensus::import_queue(
- client.clone(),
- block_import.clone(),
- move |_, _| async move {
- let time = sp_timestamp::InherentDataProvider::from_system_time();
-
- Ok((time,))
- },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- false,
- )?;
-
- Ok(PartialComponents {
- backend,
+ // let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ // let import_queue = nimbus_consensus::import_queue(
+ // client.clone(),
+ // block_import.clone(),
+ // move |_, _| async move {
+ // let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ // Ok((time,))
+ // },
+ // &task_manager.spawn_essential_handle(),
+ // config.prometheus_registry(),
+ // false,
+ // )?;
+
+ // Ok(PartialComponents {
+ // backend,
+ // client,
+ // import_queue,
+ // keystore_container,
+ // task_manager,
+ // transaction_pool,
+ // select_chain,
+ // other: (block_import, telemetry, telemetry_worker_handle),
+ // })
+ Ok(NewPartial {
client,
- import_queue,
- keystore_container,
- task_manager,
+ backend,
transaction_pool,
- select_chain,
- other: (block_import, telemetry, telemetry_worker_handle),
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
})
}
diff --git a/container-chains/templates/frontier/node/Cargo.toml b/container-chains/templates/frontier/node/Cargo.toml
index 4c868bdd5..7ab6c5189 100644
--- a/container-chains/templates/frontier/node/Cargo.toml
+++ b/container-chains/templates/frontier/node/Cargo.toml
@@ -21,6 +21,7 @@ serde = { workspace = true, features = [ "derive" ] }
url = { workspace = true }
# Local
+node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true }
container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
diff --git a/container-chains/templates/frontier/node/src/service.rs b/container-chains/templates/frontier/node/src/service.rs
index 9a28d6178..99d31eeae 100644
--- a/container-chains/templates/frontier/node/src/service.rs
+++ b/container-chains/templates/frontier/node/src/service.rs
@@ -18,7 +18,6 @@
use {
cumulus_client_consensus_common::ParachainBlockImport,
- sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
sc_network::config::FullNetworkConfiguration,
};
// std
@@ -65,9 +64,11 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
- sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sc_telemetry::{Telemetry, TelemetryWorkerHandle},
};
+use node_common::service::NewPartial;
+
/// Native executor type.
use crate::client::TemplateRuntimeExecutor;
@@ -160,53 +161,15 @@ pub fn new_partial(
// Use ethereum style for subscription ids
config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider));
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- // Default runtime_cache_size is 2
- // For now we can work with this, but it will likely need
- // to change once we start having runtime_cache_sizes, or
- // run nodes with the maximum for this value
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
- extra_pages: h as _,
- });
-
- let wasm = WasmExecutor::builder()
- .with_execution_method(config.wasm_method)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .build();
-
- let executor = ParachainExecutor::new_with_wasm_executor(wasm);
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager
- .spawn_handle()
- .spawn("telemetry", None, worker.run());
- telemetry
- });
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
let maybe_select_chain = if dev_service {
Some(sc_consensus::LongestChain::new(backend.clone()))
@@ -214,14 +177,6 @@ pub fn new_partial(
None
};
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new())));
let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
diff --git a/container-chains/templates/simple/node/src/service.rs b/container-chains/templates/simple/node/src/service.rs
index 0a8b23ac1..6b81d696f 100644
--- a/container-chains/templates/simple/node/src/service.rs
+++ b/container-chains/templates/simple/node/src/service.rs
@@ -1,5 +1,3 @@
-//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
-
// Copyright (C) Moondance Labs Ltd.
// This file is part of Tanssi.
@@ -15,14 +13,18 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see .
-use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
+
+//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
// std
use std::{sync::Arc, time::Duration};
use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfiguration};
// Local Runtime Types
-use container_chain_template_simple_runtime::{opaque::Block, RuntimeApi};
+use {
+ container_chain_template_simple_runtime::{opaque::Block, RuntimeApi},
+ node_common::service::NewPartial,
+};
// Cumulus Imports
#[allow(deprecated)]
@@ -44,7 +46,7 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
- sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
+ sc_telemetry::{Telemetry, TelemetryWorkerHandle},
sc_transaction_pool_api::OffchainTransactionPoolFactory,
};
@@ -92,7 +94,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- node_common::service::new_partial(config, ())
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: (),
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
diff --git a/node/src/service.rs b/node/src/service.rs
index b6f799c05..07567c0d7 100644
--- a/node/src/service.rs
+++ b/node/src/service.rs
@@ -16,6 +16,8 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
+use node_common::service::NewPartial;
+
#[allow(deprecated)]
use {
crate::{
@@ -137,7 +139,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
- node_common::service::new_partial(config, None)
+ let NewPartial {
+ client,
+ backend,
+ transaction_pool,
+ telemetry,
+ telemetry_worker_handle,
+ task_manager,
+ keystore_container,
+ } = node_common::service::new_partial(config)?;
+
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
+
+ let import_queue = nimbus_consensus::import_queue(
+ client.clone(),
+ block_import.clone(),
+ move |_, _| async move {
+ let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+ Ok((time,))
+ },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ false,
+ )?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: None,
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
/// Background task used to detect changes to container chain assignment,