Skip to content

Commit

Permalink
extract divergent code with frontier new_partial
Browse files Browse the repository at this point in the history
  • Loading branch information
nanocryk committed Nov 3, 2023
1 parent 5dfb323 commit 80326e9
Show file tree
Hide file tree
Showing 6 changed files with 170 additions and 101 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

114 changes: 77 additions & 37 deletions client/node-common/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see <http://www.gnu.org/licenses/>.

use sp_block_builder::BlockBuilder;
use {
sc_service::{KeystoreContainer, TaskManager},
sp_block_builder::BlockBuilder,
};

use {
cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport,
sc_executor::{
HeapAllocStrategy, NativeElseWasmExecutor, NativeExecutionDispatch, WasmExecutor,
DEFAULT_HEAP_ALLOC_STRATEGY,
},
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient},
sc_service::{Configuration, TFullBackend, TFullClient},
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
sp_api::ConstructRuntimeApi,
sp_transaction_pool::runtime_api::TaggedTransactionQueue,
Expand All @@ -42,25 +45,53 @@ pub type ParachainBlockImport<Block, RuntimeApi, ParachainNativeExecutor> = TPar
pub type ConstructedRuntimeApi<Block, Client, RuntimeApi> =
<RuntimeApi as ConstructRuntimeApi<Block, Client>>::RuntimeApi;

pub fn new_partial<Block, RuntimeApi, ParachainNativeExecutor, SelectChain>(
config: &Configuration,
select_chain: SelectChain,
) -> Result<
PartialComponents<
pub struct NewPartial<Block, RuntimeApi, ParachainNativeExecutor>
where
Block: cumulus_primitives_core::BlockT,
ParachainNativeExecutor: NativeExecutionDispatch + 'static,
RuntimeApi: ConstructRuntimeApi<Block, ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>>
+ Sync
+ Send
+ 'static,
ConstructedRuntimeApi<
Block,
ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>,
ParachainBackend<Block>,
SelectChain,
sc_consensus::DefaultImportQueue<Block>,
RuntimeApi,
>: TaggedTransactionQueue<Block> + BlockBuilder<Block>,
{
pub client: Arc<ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>>,
pub backend: Arc<ParachainBackend<Block>>,
pub task_manager: TaskManager,
pub keystore_container: KeystoreContainer,
pub transaction_pool: Arc<
sc_transaction_pool::FullPool<
Block,
ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>,
>,
(
ParachainBlockImport<Block, RuntimeApi, ParachainNativeExecutor>,
Option<Telemetry>,
Option<TelemetryWorkerHandle>,
),
>,
pub telemetry: Option<Telemetry>,
pub telemetry_worker_handle: Option<TelemetryWorkerHandle>,
}

pub fn new_partial<Block, RuntimeApi, ParachainNativeExecutor>(
config: &Configuration,
) -> Result<
// PartialComponents<
// ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>,
// ParachainBackend<Block>,
// SelectChain,
// sc_consensus::DefaultImportQueue<Block>,
// sc_transaction_pool::FullPool<
// Block,
// ParachainClient<Block, RuntimeApi, ParachainNativeExecutor>,
// >,
// (
// ParachainBlockImport<Block, RuntimeApi, ParachainNativeExecutor>,
// Option<Telemetry>,
// Option<TelemetryWorkerHandle>,
// ),
// >,
NewPartial<Block, RuntimeApi, ParachainNativeExecutor>,
sc_service::Error,
>
where
Expand Down Expand Up @@ -133,29 +164,38 @@ where
client.clone(),
);

let block_import = ParachainBlockImport::new(client.clone(), backend.clone());

let import_queue = nimbus_consensus::import_queue(
client.clone(),
block_import.clone(),
move |_, _| async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();

Ok((time,))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
false,
)?;

Ok(PartialComponents {
backend,
// let block_import = ParachainBlockImport::new(client.clone(), backend.clone());

// let import_queue = nimbus_consensus::import_queue(
// client.clone(),
// block_import.clone(),
// move |_, _| async move {
// let time = sp_timestamp::InherentDataProvider::from_system_time();

// Ok((time,))
// },
// &task_manager.spawn_essential_handle(),
// config.prometheus_registry(),
// false,
// )?;

// Ok(PartialComponents {
// backend,
// client,
// import_queue,
// keystore_container,
// task_manager,
// transaction_pool,
// select_chain,
// other: (block_import, telemetry, telemetry_worker_handle),
// })
Ok(NewPartial {
client,
import_queue,
keystore_container,
task_manager,
backend,
transaction_pool,
select_chain,
other: (block_import, telemetry, telemetry_worker_handle),
telemetry,
telemetry_worker_handle,
task_manager,
keystore_container,
})
}
1 change: 1 addition & 0 deletions container-chains/templates/frontier/node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ serde = { workspace = true, features = [ "derive" ] }
url = { workspace = true }

# Local
node-common = { workspace = true }
ccp-authorities-noting-inherent = { workspace = true }
container-chain-template-frontier-runtime = { workspace = true, features = [ "std" ] }
manual-xcm-rpc = { workspace = true }
Expand Down
69 changes: 12 additions & 57 deletions container-chains/templates/frontier/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

use {
cumulus_client_consensus_common::ParachainBlockImport,
sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY},
sc_network::config::FullNetworkConfiguration,
};
// std
Expand Down Expand Up @@ -65,9 +64,11 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
sc_telemetry::{Telemetry, TelemetryWorkerHandle},
};

use node_common::service::NewPartial;

/// Native executor type.
use crate::client::TemplateRuntimeExecutor;

Expand Down Expand Up @@ -160,68 +161,22 @@ pub fn new_partial(
// Use ethereum style for subscription ids
config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider));

let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

// Default runtime_cache_size is 2
// For now we can work with this, but it will likely need
// to change once we start having runtime_cache_sizes, or
// run nodes with the maximum for this value
let heap_pages = config
.default_heap_pages
.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static {
extra_pages: h as _,
});

let wasm = WasmExecutor::builder()
.with_execution_method(config.wasm_method)
.with_onchain_heap_alloc_strategy(heap_pages)
.with_offchain_heap_alloc_strategy(heap_pages)
.with_max_runtime_instances(config.max_runtime_instances)
.with_runtime_cache_size(config.runtime_cache_size)
.build();

let executor = ParachainExecutor::new_with_wasm_executor(wasm);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);

let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());

let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager
.spawn_handle()
.spawn("telemetry", None, worker.run());
telemetry
});
let NewPartial {
client,
backend,
transaction_pool,
telemetry,
telemetry_worker_handle,
task_manager,
keystore_container,
} = node_common::service::new_partial(config)?;

let maybe_select_chain = if dev_service {
Some(sc_consensus::LongestChain::new(backend.clone()))
} else {
None
};

let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);

let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));

Expand Down
48 changes: 42 additions & 6 deletions container-chains/templates/simple/node/src/service.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.

// Copyright (C) Moondance Labs Ltd.
// This file is part of Tanssi.

Expand All @@ -15,14 +13,18 @@

// You should have received a copy of the GNU General Public License
// along with Tanssi. If not, see <http://www.gnu.org/licenses/>.
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};

//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.

// std
use std::{sync::Arc, time::Duration};

use {cumulus_client_cli::CollatorOptions, sc_network::config::FullNetworkConfiguration};
// Local Runtime Types
use container_chain_template_simple_runtime::{opaque::Block, RuntimeApi};
use {
container_chain_template_simple_runtime::{opaque::Block, RuntimeApi},
node_common::service::NewPartial,
};

// Cumulus Imports
#[allow(deprecated)]
Expand All @@ -44,7 +46,7 @@ use {
sc_executor::NativeElseWasmExecutor,
sc_network::NetworkBlock,
sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager},
sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle},
sc_telemetry::{Telemetry, TelemetryWorkerHandle},
sc_transaction_pool_api::OffchainTransactionPoolFactory,
};

Expand Down Expand Up @@ -92,7 +94,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
node_common::service::new_partial(config, ())
let NewPartial {
client,
backend,
transaction_pool,
telemetry,
telemetry_worker_handle,
task_manager,
keystore_container,
} = node_common::service::new_partial(config)?;

let block_import = ParachainBlockImport::new(client.clone(), backend.clone());

let import_queue = nimbus_consensus::import_queue(
client.clone(),
block_import.clone(),
move |_, _| async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();

Ok((time,))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
false,
)?;

Ok(PartialComponents {
backend,
client,
import_queue,
keystore_container,
task_manager,
transaction_pool,
select_chain: (),
other: (block_import, telemetry, telemetry_worker_handle),
})
}

/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
Expand Down
38 changes: 37 additions & 1 deletion node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.

use node_common::service::NewPartial;

#[allow(deprecated)]
use {
crate::{
Expand Down Expand Up @@ -137,7 +139,41 @@ pub fn new_partial(
>,
sc_service::Error,
> {
node_common::service::new_partial(config, None)
let NewPartial {
client,
backend,
transaction_pool,
telemetry,
telemetry_worker_handle,
task_manager,
keystore_container,
} = node_common::service::new_partial(config)?;

let block_import = ParachainBlockImport::new(client.clone(), backend.clone());

let import_queue = nimbus_consensus::import_queue(
client.clone(),
block_import.clone(),
move |_, _| async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();

Ok((time,))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
false,
)?;

Ok(PartialComponents {
backend,
client,
import_queue,
keystore_container,
task_manager,
transaction_pool,
select_chain: None,
other: (block_import, telemetry, telemetry_worker_handle),
})
}

/// Background task used to detect changes to container chain assignment,
Expand Down

0 comments on commit 80326e9

Please sign in to comment.